Merge tag 'drm-msm-next-2020-09-27' of https://gitlab.freedesktop.org/drm/msm into drm-next

* DSI support for sm8150/sm8250
* Support for per-process GPU pagetables (finally!) for a6xx.
  There are still some iommu/arm-smmu changes required to
  enable, without which it will fallback to the current single
  pgtable state.  The first part (ie. what doesn't depend on
  drm side patches) is queued up for v5.10[1].
* DisplayPort support.  Userspace DP compliance tool support
  is already merged in IGT[2]
* The usual assortment of smaller fixes/cleanups

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Rob Clark <robdclark@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/CAF6AEGvqjuzH=Po_9EzzFsp2Xq3tqJUTKfsA2g09XY7_+6Ypfw@mail.gmail.com
This commit is contained in:
Dave Airlie 2020-09-29 10:18:49 +10:00
commit 91d0ca3d6b
89 changed files with 13074 additions and 774 deletions

View File

@ -90,6 +90,8 @@ Required properties:
* "qcom,dsi-phy-14nm-660"
* "qcom,dsi-phy-10nm"
* "qcom,dsi-phy-10nm-8998"
* "qcom,dsi-phy-7nm"
* "qcom,dsi-phy-7nm-8150"
- reg: Physical base address and length of the registers of PLL, PHY. Some
revisions require the PHY regulator base address, whereas others require the
PHY lane base address. See below for each PHY revision.
@ -98,7 +100,7 @@ Required properties:
* "dsi_pll"
* "dsi_phy"
* "dsi_phy_regulator"
For DSI 14nm and 10nm PHYs:
For DSI 14nm, 10nm and 7nm PHYs:
* "dsi_pll"
* "dsi_phy"
* "dsi_phy_lane"
@ -116,7 +118,7 @@ Required properties:
- vcca-supply: phandle to vcca regulator device node
For 14nm PHY:
- vcca-supply: phandle to vcca regulator device node
For 10nm PHY:
For 10nm and 7nm PHY:
- vdds-supply: phandle to vdds regulator device node
Optional properties:

View File

@ -8168,7 +8168,7 @@ static void compute_m_n(unsigned int m, unsigned int n,
* which the devices expect also in synchronous clock mode.
*/
if (constant_n)
*ret_n = 0x8000;
*ret_n = DP_LINK_CONSTANT_N_VALUE;
else
*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);

View File

@ -6,8 +6,8 @@ config DRM_MSM
depends on ARCH_QCOM || SOC_IMX5 || (ARM && COMPILE_TEST)
depends on OF && COMMON_CLK
depends on MMU
depends on INTERCONNECT || !INTERCONNECT
depends on QCOM_OCMEM || QCOM_OCMEM=n
select IOMMU_IO_PGTABLE
select QCOM_MDT_LOADER if ARCH_QCOM
select REGULATOR
select DRM_KMS_HELPER
@ -57,6 +57,15 @@ config DRM_MSM_HDMI_HDCP
help
Choose this option to enable HDCP state machine
config DRM_MSM_DP
bool "Enable DisplayPort support in MSM DRM driver"
depends on DRM_MSM
default y
help
Compile in support for DP driver in MSM DRM driver. DP external
display support is enabled through this config option. It can
be primary or secondary display on device.
config DRM_MSM_DSI
bool "Enable DSI support in MSM DRM driver"
depends on DRM_MSM
@ -110,3 +119,11 @@ config DRM_MSM_DSI_10NM_PHY
default y
help
Choose this option if DSI PHY on SDM845 is used on the platform.
config DRM_MSM_DSI_7NM_PHY
bool "Enable DSI 7nm PHY driver in MSM DRM (used by SM8150/SM8250)"
depends on DRM_MSM_DSI
default y
help
Choose this option if DSI PHY on SM8150/SM8250 is used on the
platform.

View File

@ -2,6 +2,7 @@
ccflags-y := -I $(srctree)/$(src)
ccflags-y += -I $(srctree)/$(src)/disp/dpu1
ccflags-$(CONFIG_DRM_MSM_DSI) += -I $(srctree)/$(src)/dsi
ccflags-$(CONFIG_DRM_MSM_DP) += -I $(srctree)/$(src)/dp
msm-y := \
adreno/adreno_device.o \
@ -95,10 +96,23 @@ msm-y := \
msm_gpu_tracepoints.o \
msm_gpummu.o
msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o
msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o \
dp/dp_debug.o
msm-$(CONFIG_DRM_MSM_GPU_STATE) += adreno/a6xx_gpu_state.o
msm-$(CONFIG_DRM_MSM_DP)+= dp/dp_aux.o \
dp/dp_catalog.o \
dp/dp_ctrl.o \
dp/dp_display.o \
dp/dp_drm.o \
dp/dp_hpd.o \
dp/dp_link.o \
dp/dp_panel.o \
dp/dp_parser.o \
dp/dp_power.o \
dp/dp_audio.o
msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
msm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o
msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o
@ -119,6 +133,7 @@ msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o
msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/phy/dsi_phy_28nm_8960.o
msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/phy/dsi_phy_14nm.o
msm-$(CONFIG_DRM_MSM_DSI_10NM_PHY) += dsi/phy/dsi_phy_10nm.o
msm-$(CONFIG_DRM_MSM_DSI_7NM_PHY) += dsi/phy/dsi_phy_7nm.o
ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y)
msm-y += dsi/pll/dsi_pll.o
@ -126,6 +141,7 @@ msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o
msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/pll/dsi_pll_28nm_8960.o
msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/pll/dsi_pll_14nm.o
msm-$(CONFIG_DRM_MSM_DSI_10NM_PHY) += dsi/pll/dsi_pll_10nm.o
msm-$(CONFIG_DRM_MSM_DSI_7NM_PHY) += dsi/pll/dsi_pll_7nm.o
endif
obj-$(CONFIG_DRM_MSM) += msm.o

View File

@ -10,6 +10,48 @@ extern bool hang_debug;
static void a2xx_dump(struct msm_gpu *gpu);
static bool a2xx_idle(struct msm_gpu *gpu);
static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
struct msm_drm_private *priv = gpu->dev->dev_private;
struct msm_ringbuffer *ring = submit->ring;
unsigned int i;
for (i = 0; i < submit->nr_cmds; i++) {
switch (submit->cmd[i].type) {
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
/* ignore IB-targets */
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
/* ignore if there has not been a ctx switch: */
if (priv->lastctx == submit->queue->ctx)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2);
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
OUT_RING(ring, submit->cmd[i].size);
OUT_PKT2(ring);
break;
}
}
OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
OUT_RING(ring, submit->seqno);
/* wait for idle before cache flush/interrupt */
OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
OUT_RING(ring, 0x00000000);
OUT_PKT3(ring, CP_EVENT_WRITE, 3);
OUT_RING(ring, CACHE_FLUSH_TS);
OUT_RING(ring, rbmemptr(ring, fence));
OUT_RING(ring, submit->seqno);
OUT_PKT3(ring, CP_INTERRUPT, 1);
OUT_RING(ring, 0x80000000);
adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR);
}
static bool a2xx_me_init(struct msm_gpu *gpu)
{
struct msm_ringbuffer *ring = gpu->rb[0];
@ -53,7 +95,7 @@ static bool a2xx_me_init(struct msm_gpu *gpu)
OUT_PKT3(ring, CP_SET_PROTECTED_MODE, 1);
OUT_RING(ring, 1);
gpu->funcs->flush(gpu, ring);
adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR);
return a2xx_idle(gpu);
}
@ -421,16 +463,11 @@ a2xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
return aspace;
}
/* Register offset defines for A2XX - copy of A3XX */
static const unsigned int a2xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_AXXX_CP_RB_RPTR_ADDR),
REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_AXXX_CP_RB_RPTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_AXXX_CP_RB_WPTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_AXXX_CP_RB_CNTL),
};
static u32 a2xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
ring->memptrs->rptr = gpu_read(gpu, REG_AXXX_CP_RB_RPTR);
return ring->memptrs->rptr;
}
static const struct adreno_gpu_funcs funcs = {
.base = {
@ -439,8 +476,7 @@ static const struct adreno_gpu_funcs funcs = {
.pm_suspend = msm_gpu_pm_suspend,
.pm_resume = msm_gpu_pm_resume,
.recover = a2xx_recover,
.submit = adreno_submit,
.flush = adreno_flush,
.submit = a2xx_submit,
.active_ring = adreno_active_ring,
.irq = a2xx_irq,
.destroy = a2xx_destroy,
@ -450,6 +486,7 @@ static const struct adreno_gpu_funcs funcs = {
.gpu_state_get = a2xx_gpu_state_get,
.gpu_state_put = adreno_gpu_state_put,
.create_address_space = a2xx_create_address_space,
.get_rptr = a2xx_get_rptr,
},
};
@ -491,8 +528,6 @@ struct msm_gpu *a2xx_gpu_init(struct drm_device *dev)
else
adreno_gpu->registers = a220_registers;
adreno_gpu->reg_offsets = a2xx_register_offsets;
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
if (ret)
goto fail;

View File

@ -28,6 +28,61 @@ extern bool hang_debug;
static void a3xx_dump(struct msm_gpu *gpu);
static bool a3xx_idle(struct msm_gpu *gpu);
static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
struct msm_drm_private *priv = gpu->dev->dev_private;
struct msm_ringbuffer *ring = submit->ring;
unsigned int i;
for (i = 0; i < submit->nr_cmds; i++) {
switch (submit->cmd[i].type) {
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
/* ignore IB-targets */
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
/* ignore if there has not been a ctx switch: */
if (priv->lastctx == submit->queue->ctx)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2);
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
OUT_RING(ring, submit->cmd[i].size);
OUT_PKT2(ring);
break;
}
}
OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
OUT_RING(ring, submit->seqno);
/* Flush HLSQ lazy updates to make sure there is nothing
* pending for indirect loads after the timestamp has
* passed:
*/
OUT_PKT3(ring, CP_EVENT_WRITE, 1);
OUT_RING(ring, HLSQ_FLUSH);
/* wait for idle before cache flush/interrupt */
OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
OUT_RING(ring, 0x00000000);
/* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
OUT_PKT3(ring, CP_EVENT_WRITE, 3);
OUT_RING(ring, CACHE_FLUSH_TS | BIT(31));
OUT_RING(ring, rbmemptr(ring, fence));
OUT_RING(ring, submit->seqno);
#if 0
/* Dummy set-constant to trigger context rollover */
OUT_PKT3(ring, CP_SET_CONSTANT, 2);
OUT_RING(ring, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG));
OUT_RING(ring, 0x00000000);
#endif
adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR);
}
static bool a3xx_me_init(struct msm_gpu *gpu)
{
struct msm_ringbuffer *ring = gpu->rb[0];
@ -51,7 +106,7 @@ static bool a3xx_me_init(struct msm_gpu *gpu)
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
gpu->funcs->flush(gpu, ring);
adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR);
return a3xx_idle(gpu);
}
@ -423,16 +478,11 @@ static struct msm_gpu_state *a3xx_gpu_state_get(struct msm_gpu *gpu)
return state;
}
/* Register offset defines for A3XX */
static const unsigned int a3xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_AXXX_CP_RB_RPTR_ADDR),
REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_AXXX_CP_RB_RPTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_AXXX_CP_RB_WPTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_AXXX_CP_RB_CNTL),
};
static u32 a3xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
ring->memptrs->rptr = gpu_read(gpu, REG_AXXX_CP_RB_RPTR);
return ring->memptrs->rptr;
}
static const struct adreno_gpu_funcs funcs = {
.base = {
@ -441,8 +491,7 @@ static const struct adreno_gpu_funcs funcs = {
.pm_suspend = msm_gpu_pm_suspend,
.pm_resume = msm_gpu_pm_resume,
.recover = a3xx_recover,
.submit = adreno_submit,
.flush = adreno_flush,
.submit = a3xx_submit,
.active_ring = adreno_active_ring,
.irq = a3xx_irq,
.destroy = a3xx_destroy,
@ -452,6 +501,7 @@ static const struct adreno_gpu_funcs funcs = {
.gpu_state_get = a3xx_gpu_state_get,
.gpu_state_put = adreno_gpu_state_put,
.create_address_space = adreno_iommu_create_address_space,
.get_rptr = a3xx_get_rptr,
},
};
@ -490,7 +540,6 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs);
adreno_gpu->registers = a3xx_registers;
adreno_gpu->reg_offsets = a3xx_register_offsets;
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
if (ret)

View File

@ -22,6 +22,54 @@ extern bool hang_debug;
static void a4xx_dump(struct msm_gpu *gpu);
static bool a4xx_idle(struct msm_gpu *gpu);
static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
struct msm_drm_private *priv = gpu->dev->dev_private;
struct msm_ringbuffer *ring = submit->ring;
unsigned int i;
for (i = 0; i < submit->nr_cmds; i++) {
switch (submit->cmd[i].type) {
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
/* ignore IB-targets */
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
/* ignore if there has not been a ctx switch: */
if (priv->lastctx == submit->queue->ctx)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFE, 2);
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
OUT_RING(ring, submit->cmd[i].size);
OUT_PKT2(ring);
break;
}
}
OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
OUT_RING(ring, submit->seqno);
/* Flush HLSQ lazy updates to make sure there is nothing
* pending for indirect loads after the timestamp has
* passed:
*/
OUT_PKT3(ring, CP_EVENT_WRITE, 1);
OUT_RING(ring, HLSQ_FLUSH);
/* wait for idle before cache flush/interrupt */
OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
OUT_RING(ring, 0x00000000);
/* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
OUT_PKT3(ring, CP_EVENT_WRITE, 3);
OUT_RING(ring, CACHE_FLUSH_TS | BIT(31));
OUT_RING(ring, rbmemptr(ring, fence));
OUT_RING(ring, submit->seqno);
adreno_flush(gpu, ring, REG_A4XX_CP_RB_WPTR);
}
/*
* a4xx_enable_hwcg() - Program the clock control registers
* @device: The adreno device pointer
@ -129,7 +177,7 @@ static bool a4xx_me_init(struct msm_gpu *gpu)
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
gpu->funcs->flush(gpu, ring);
adreno_flush(gpu, ring, REG_A4XX_CP_RB_WPTR);
return a4xx_idle(gpu);
}
@ -515,17 +563,6 @@ static struct msm_gpu_state *a4xx_gpu_state_get(struct msm_gpu *gpu)
return state;
}
/* Register offset defines for A4XX, in order of enum adreno_regs */
static const unsigned int a4xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A4XX_CP_RB_BASE),
REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A4XX_CP_RB_RPTR_ADDR),
REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A4XX_CP_RB_RPTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A4XX_CP_RB_WPTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A4XX_CP_RB_CNTL),
};
static void a4xx_dump(struct msm_gpu *gpu)
{
printk("status: %08x\n",
@ -576,6 +613,12 @@ static int a4xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
return 0;
}
static u32 a4xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
ring->memptrs->rptr = gpu_read(gpu, REG_A4XX_CP_RB_RPTR);
return ring->memptrs->rptr;
}
static const struct adreno_gpu_funcs funcs = {
.base = {
.get_param = adreno_get_param,
@ -583,8 +626,7 @@ static const struct adreno_gpu_funcs funcs = {
.pm_suspend = a4xx_pm_suspend,
.pm_resume = a4xx_pm_resume,
.recover = a4xx_recover,
.submit = adreno_submit,
.flush = adreno_flush,
.submit = a4xx_submit,
.active_ring = adreno_active_ring,
.irq = a4xx_irq,
.destroy = a4xx_destroy,
@ -594,6 +636,7 @@ static const struct adreno_gpu_funcs funcs = {
.gpu_state_get = a4xx_gpu_state_get,
.gpu_state_put = adreno_gpu_state_put,
.create_address_space = adreno_iommu_create_address_space,
.get_rptr = a4xx_get_rptr,
},
.get_timestamp = a4xx_get_timestamp,
};
@ -631,15 +674,12 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
adreno_gpu->registers = adreno_is_a405(adreno_gpu) ? a405_registers :
a4xx_registers;
adreno_gpu->reg_offsets = a4xx_register_offsets;
/* if needed, allocate gmem: */
if (adreno_is_a4xx(adreno_gpu)) {
ret = adreno_gpu_ocmem_init(dev->dev, adreno_gpu,
&a4xx_gpu->ocmem);
if (ret)
goto fail;
}
ret = adreno_gpu_ocmem_init(dev->dev, adreno_gpu,
&a4xx_gpu->ocmem);
if (ret)
goto fail;
if (!gpu->aspace) {
/* TODO we think it is possible to configure the GPU to

View File

@ -11,7 +11,7 @@
#include "a5xx_gpu.h"
static int pfp_print(struct msm_gpu *gpu, struct drm_printer *p)
static void pfp_print(struct msm_gpu *gpu, struct drm_printer *p)
{
int i;
@ -22,11 +22,9 @@ static int pfp_print(struct msm_gpu *gpu, struct drm_printer *p)
drm_printf(p, " %02x: %08x\n", i,
gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA));
}
return 0;
}
static int me_print(struct msm_gpu *gpu, struct drm_printer *p)
static void me_print(struct msm_gpu *gpu, struct drm_printer *p)
{
int i;
@ -37,11 +35,9 @@ static int me_print(struct msm_gpu *gpu, struct drm_printer *p)
drm_printf(p, " %02x: %08x\n", i,
gpu_read(gpu, REG_A5XX_CP_ME_STAT_DATA));
}
return 0;
}
static int meq_print(struct msm_gpu *gpu, struct drm_printer *p)
static void meq_print(struct msm_gpu *gpu, struct drm_printer *p)
{
int i;
@ -52,11 +48,9 @@ static int meq_print(struct msm_gpu *gpu, struct drm_printer *p)
drm_printf(p, " %02x: %08x\n", i,
gpu_read(gpu, REG_A5XX_CP_MEQ_DBG_DATA));
}
return 0;
}
static int roq_print(struct msm_gpu *gpu, struct drm_printer *p)
static void roq_print(struct msm_gpu *gpu, struct drm_printer *p)
{
int i;
@ -71,8 +65,6 @@ static int roq_print(struct msm_gpu *gpu, struct drm_printer *p)
drm_printf(p, " %02x: %08x %08x %08x %08x\n", i,
val[0], val[1], val[2], val[3]);
}
return 0;
}
static int show(struct seq_file *m, void *arg)
@ -81,10 +73,11 @@ static int show(struct seq_file *m, void *arg)
struct drm_device *dev = node->minor->dev;
struct msm_drm_private *priv = dev->dev_private;
struct drm_printer p = drm_seq_file_printer(m);
int (*show)(struct msm_gpu *gpu, struct drm_printer *p) =
void (*show)(struct msm_gpu *gpu, struct drm_printer *p) =
node->info_ent->data;
return show(priv->gpu, &p);
show(priv->gpu, &p);
return 0;
}
#define ENT(n) { .name = #n, .show = show, .data = n ##_print }

View File

@ -18,13 +18,24 @@ static void a5xx_dump(struct msm_gpu *gpu);
#define GPU_PAS_ID 13
static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
bool sync)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
uint32_t wptr;
unsigned long flags;
/*
* Most flush operations need to issue a WHERE_AM_I opcode to sync up
* the rptr shadow
*/
if (a5xx_gpu->has_whereami && sync) {
OUT_PKT7(ring, CP_WHERE_AM_I, 2);
OUT_RING(ring, lower_32_bits(shadowptr(a5xx_gpu, ring)));
OUT_RING(ring, upper_32_bits(shadowptr(a5xx_gpu, ring)));
}
spin_lock_irqsave(&ring->lock, flags);
/* Copy the shadow to the actual register */
@ -43,8 +54,7 @@ static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr);
}
static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx)
static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
struct msm_drm_private *priv = gpu->dev->dev_private;
struct msm_ringbuffer *ring = submit->ring;
@ -57,7 +67,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
if (priv->lastctx == ctx)
if (priv->lastctx == submit->queue->ctx)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
@ -91,7 +101,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
}
}
a5xx_flush(gpu, ring);
a5xx_flush(gpu, ring, true);
a5xx_preempt_trigger(gpu);
/* we might not necessarily have a cmd from userspace to
@ -103,8 +113,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
msm_gpu_retire(gpu);
}
static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx)
static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
@ -114,7 +123,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) {
priv->lastctx = NULL;
a5xx_submit_in_rb(gpu, submit, ctx);
a5xx_submit_in_rb(gpu, submit);
return;
}
@ -148,7 +157,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
if (priv->lastctx == ctx)
if (priv->lastctx == submit->queue->ctx)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
@ -206,7 +215,8 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
/* Set bit 0 to trigger an interrupt on preempt complete */
OUT_RING(ring, 0x01);
a5xx_flush(gpu, ring);
/* A WHERE_AM_I packet is not needed after a YIELD */
a5xx_flush(gpu, ring, false);
/* Check to see if we need to start preemption */
a5xx_preempt_trigger(gpu);
@ -365,7 +375,7 @@ static int a5xx_me_init(struct msm_gpu *gpu)
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
gpu->funcs->flush(gpu, ring);
a5xx_flush(gpu, ring, true);
return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
}
@ -407,11 +417,31 @@ static int a5xx_preempt_start(struct msm_gpu *gpu)
OUT_RING(ring, 0x01);
OUT_RING(ring, 0x01);
gpu->funcs->flush(gpu, ring);
/* The WHERE_AMI_I packet is not needed after a YIELD is issued */
a5xx_flush(gpu, ring, false);
return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
}
static void a5xx_ucode_check_version(struct a5xx_gpu *a5xx_gpu,
struct drm_gem_object *obj)
{
u32 *buf = msm_gem_get_vaddr_active(obj);
if (IS_ERR(buf))
return;
/*
* If the lowest nibble is 0xa that is an indication that this microcode
* has been patched. The actual version is in dword [3] but we only care
* about the patchlevel which is the lowest nibble of dword [3]
*/
if (((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1)
a5xx_gpu->has_whereami = true;
msm_gem_put_vaddr(obj);
}
static int a5xx_ucode_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@ -447,6 +477,7 @@ static int a5xx_ucode_init(struct msm_gpu *gpu)
}
msm_gem_object_set_name(a5xx_gpu->pfp_bo, "pfpfw");
a5xx_ucode_check_version(a5xx_gpu, a5xx_gpu->pfp_bo);
}
gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO,
@ -506,6 +537,7 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu)
static int a5xx_hw_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
int ret;
gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
@ -714,9 +746,36 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
gpu_write64(gpu, REG_A5XX_CP_RB_BASE, REG_A5XX_CP_RB_BASE_HI,
gpu->rb[0]->iova);
/*
* If the microcode supports the WHERE_AM_I opcode then we can use that
* in lieu of the RPTR shadow and enable preemption. Otherwise, we
* can't safely use the RPTR shadow or preemption. In either case, the
* RPTR shadow should be disabled in hardware.
*/
gpu_write(gpu, REG_A5XX_CP_RB_CNTL,
MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
/* Disable preemption if WHERE_AM_I isn't available */
if (!a5xx_gpu->has_whereami && gpu->nr_rings > 1) {
a5xx_preempt_fini(gpu);
gpu->nr_rings = 1;
} else {
/* Create a privileged buffer for the RPTR shadow */
if (!a5xx_gpu->shadow_bo) {
a5xx_gpu->shadow = msm_gem_kernel_new(gpu->dev,
sizeof(u32) * gpu->nr_rings,
MSM_BO_UNCACHED | MSM_BO_MAP_PRIV,
gpu->aspace, &a5xx_gpu->shadow_bo,
&a5xx_gpu->shadow_iova);
if (IS_ERR(a5xx_gpu->shadow))
return PTR_ERR(a5xx_gpu->shadow);
}
gpu_write64(gpu, REG_A5XX_CP_RB_RPTR_ADDR,
REG_A5XX_CP_RB_RPTR_ADDR_HI, shadowptr(a5xx_gpu, gpu->rb[0]));
}
a5xx_preempt_hw_init(gpu);
/* Disable the interrupts through the initial bringup stage */
@ -740,7 +799,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
OUT_PKT7(gpu->rb[0], CP_EVENT_WRITE, 1);
OUT_RING(gpu->rb[0], CP_EVENT_WRITE_0_EVENT(STAT_EVENT));
gpu->funcs->flush(gpu, gpu->rb[0]);
a5xx_flush(gpu, gpu->rb[0], true);
if (!a5xx_idle(gpu, gpu->rb[0]))
return -EINVAL;
}
@ -758,7 +817,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1);
OUT_RING(gpu->rb[0], 0x00000000);
gpu->funcs->flush(gpu, gpu->rb[0]);
a5xx_flush(gpu, gpu->rb[0], true);
if (!a5xx_idle(gpu, gpu->rb[0]))
return -EINVAL;
} else if (ret == -ENODEV) {
@ -825,6 +884,11 @@ static void a5xx_destroy(struct msm_gpu *gpu)
drm_gem_object_put(a5xx_gpu->gpmu_bo);
}
if (a5xx_gpu->shadow_bo) {
msm_gem_unpin_iova(a5xx_gpu->shadow_bo, gpu->aspace);
drm_gem_object_put(a5xx_gpu->shadow_bo);
}
adreno_gpu_cleanup(adreno_gpu);
kfree(a5xx_gpu);
}
@ -1057,17 +1121,6 @@ static irqreturn_t a5xx_irq(struct msm_gpu *gpu)
return IRQ_HANDLED;
}
static const u32 a5xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A5XX_CP_RB_BASE),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE_HI, REG_A5XX_CP_RB_BASE_HI),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A5XX_CP_RB_RPTR_ADDR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR_HI,
REG_A5XX_CP_RB_RPTR_ADDR_HI),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A5XX_CP_RB_RPTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A5XX_CP_RB_WPTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A5XX_CP_RB_CNTL),
};
static const u32 a5xx_registers[] = {
0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B,
0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
@ -1432,6 +1485,17 @@ static unsigned long a5xx_gpu_busy(struct msm_gpu *gpu)
return (unsigned long)busy_time;
}
static uint32_t a5xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
if (a5xx_gpu->has_whereami)
return a5xx_gpu->shadow[ring->id];
return ring->memptrs->rptr = gpu_read(gpu, REG_A5XX_CP_RB_RPTR);
}
static const struct adreno_gpu_funcs funcs = {
.base = {
.get_param = adreno_get_param,
@ -1440,7 +1504,6 @@ static const struct adreno_gpu_funcs funcs = {
.pm_resume = a5xx_pm_resume,
.recover = a5xx_recover,
.submit = a5xx_submit,
.flush = a5xx_flush,
.active_ring = a5xx_active_ring,
.irq = a5xx_irq,
.destroy = a5xx_destroy,
@ -1454,6 +1517,7 @@ static const struct adreno_gpu_funcs funcs = {
.gpu_state_get = a5xx_gpu_state_get,
.gpu_state_put = a5xx_gpu_state_put,
.create_address_space = adreno_iommu_create_address_space,
.get_rptr = a5xx_get_rptr,
},
.get_timestamp = a5xx_get_timestamp,
};
@ -1512,14 +1576,12 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
gpu = &adreno_gpu->base;
adreno_gpu->registers = a5xx_registers;
adreno_gpu->reg_offsets = a5xx_register_offsets;
a5xx_gpu->lm_leakage = 0x4E001A;
check_speed_bin(&pdev->dev);
/* Restricting nr_rings to 1 to temporarily disable preemption */
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4);
if (ret) {
a5xx_destroy(&(a5xx_gpu->base.base));
return ERR_PTR(ret);

View File

@ -37,6 +37,13 @@ struct a5xx_gpu {
atomic_t preempt_state;
struct timer_list preempt_timer;
struct drm_gem_object *shadow_bo;
uint64_t shadow_iova;
uint32_t *shadow;
/* True if the microcode supports the WHERE_AM_I opcode */
bool has_whereami;
};
#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
@ -141,6 +148,9 @@ static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs,
return -ETIMEDOUT;
}
#define shadowptr(a5xx_gpu, ring) ((a5xx_gpu)->shadow_iova + \
((ring)->id * sizeof(uint32_t)))
bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
void a5xx_set_hwcg(struct msm_gpu *gpu, bool state);
@ -150,6 +160,8 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu);
void a5xx_preempt_irq(struct msm_gpu *gpu);
void a5xx_preempt_fini(struct msm_gpu *gpu);
void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, bool sync);
/* Return true if we are in a preempt state */
static inline bool a5xx_in_preempt(struct a5xx_gpu *a5xx_gpu)
{

View File

@ -240,7 +240,7 @@ static int a5xx_gpmu_init(struct msm_gpu *gpu)
OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
OUT_RING(ring, 1);
gpu->funcs->flush(gpu, ring);
a5xx_flush(gpu, ring, true);
if (!a5xx_idle(gpu, ring)) {
DRM_ERROR("%s: Unable to load GPMU firmware. GPMU will not be active\n",

View File

@ -259,8 +259,9 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
ptr->magic = A5XX_PREEMPT_RECORD_MAGIC;
ptr->info = 0;
ptr->data = 0;
ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT;
ptr->rptr_addr = rbmemptr(ring, rptr);
ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE;
ptr->rptr_addr = shadowptr(a5xx_gpu, ring);
ptr->counter = counters_iova;
return 0;

View File

@ -11,6 +11,7 @@
#include "a6xx_gpu.h"
#include "a6xx_gmu.xml.h"
#include "msm_gem.h"
#include "msm_gpu_trace.h"
#include "msm_mmu.h"
static void a6xx_gmu_fault(struct a6xx_gmu *gmu)
@ -124,6 +125,8 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
gmu->current_perf_index = perf_index;
gmu->freq = gmu->gpu_freqs[perf_index];
trace_msm_gmu_freq_change(gmu->freq, perf_index);
/*
* This can get called from devfreq while the hardware is idle. Don't
* bring up the power if it isn't already active

View File

@ -51,9 +51,20 @@ bool a6xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
uint32_t wptr;
unsigned long flags;
/* Expanded APRIV doesn't need to issue the WHERE_AM_I opcode */
if (a6xx_gpu->has_whereami && !adreno_gpu->base.hw_apriv) {
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
OUT_PKT7(ring, CP_WHERE_AM_I, 2);
OUT_RING(ring, lower_32_bits(shadowptr(a6xx_gpu, ring)));
OUT_RING(ring, upper_32_bits(shadowptr(a6xx_gpu, ring)));
}
spin_lock_irqsave(&ring->lock, flags);
/* Copy the shadow to the actual register */
@ -81,8 +92,50 @@ static void get_stats_counter(struct msm_ringbuffer *ring, u32 counter,
OUT_RING(ring, upper_32_bits(iova));
}
static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx)
static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
struct msm_ringbuffer *ring, struct msm_file_private *ctx)
{
phys_addr_t ttbr;
u32 asid;
u64 memptr = rbmemptr(ring, ttbr0);
if (ctx == a6xx_gpu->cur_ctx)
return;
if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid))
return;
/* Execute the table update */
OUT_PKT7(ring, CP_SMMU_TABLE_UPDATE, 4);
OUT_RING(ring, CP_SMMU_TABLE_UPDATE_0_TTBR0_LO(lower_32_bits(ttbr)));
OUT_RING(ring,
CP_SMMU_TABLE_UPDATE_1_TTBR0_HI(upper_32_bits(ttbr)) |
CP_SMMU_TABLE_UPDATE_1_ASID(asid));
OUT_RING(ring, CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR(0));
OUT_RING(ring, CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK(0));
/*
* Write the new TTBR0 to the memstore. This is good for debugging.
*/
OUT_PKT7(ring, CP_MEM_WRITE, 4);
OUT_RING(ring, CP_MEM_WRITE_0_ADDR_LO(lower_32_bits(memptr)));
OUT_RING(ring, CP_MEM_WRITE_1_ADDR_HI(upper_32_bits(memptr)));
OUT_RING(ring, lower_32_bits(ttbr));
OUT_RING(ring, (asid << 16) | upper_32_bits(ttbr));
/*
* And finally, trigger a uche flush to be sure there isn't anything
* lingering in that part of the GPU
*/
OUT_PKT7(ring, CP_EVENT_WRITE, 1);
OUT_RING(ring, 0x31);
a6xx_gpu->cur_ctx = ctx;
}
static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
struct msm_drm_private *priv = gpu->dev->dev_private;
@ -91,6 +144,8 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_ringbuffer *ring = submit->ring;
unsigned int i;
a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx);
get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
rbmemptr_stats(ring, index, cpcycles_start));
@ -115,7 +170,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
if (priv->lastctx == ctx)
if (priv->lastctx == submit->queue->ctx)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
@ -464,6 +519,30 @@ static int a6xx_cp_init(struct msm_gpu *gpu)
return a6xx_idle(gpu, ring) ? 0 : -EINVAL;
}
static void a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
struct drm_gem_object *obj)
{
u32 *buf = msm_gem_get_vaddr_active(obj);
if (IS_ERR(buf))
return;
/*
* If the lowest nibble is 0xa that is an indication that this microcode
* has been patched. The actual version is in dword [3] but we only care
* about the patchlevel which is the lowest nibble of dword [3]
*
* Otherwise check that the firmware is greater than or equal to 1.90
* which was the first version that had this fix built in
*/
if (((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1)
a6xx_gpu->has_whereami = true;
else if ((buf[0] & 0xfff) > 0x190)
a6xx_gpu->has_whereami = true;
msm_gem_put_vaddr(obj);
}
static int a6xx_ucode_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@ -484,6 +563,7 @@ static int a6xx_ucode_init(struct msm_gpu *gpu)
}
msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw");
a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo);
}
gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE_LO,
@ -699,12 +779,43 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
gpu_write64(gpu, REG_A6XX_CP_RB_BASE, REG_A6XX_CP_RB_BASE_HI,
gpu->rb[0]->iova);
gpu_write(gpu, REG_A6XX_CP_RB_CNTL,
MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
/* Targets that support extended APRIV can use the RPTR shadow from
* hardware but all the other ones need to disable the feature. Targets
* that support the WHERE_AM_I opcode can use that instead
*/
if (adreno_gpu->base.hw_apriv)
gpu_write(gpu, REG_A6XX_CP_RB_CNTL, MSM_GPU_RB_CNTL_DEFAULT);
else
gpu_write(gpu, REG_A6XX_CP_RB_CNTL,
MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
/*
* Expanded APRIV and targets that support WHERE_AM_I both need a
* privileged buffer to store the RPTR shadow
*/
if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami) {
if (!a6xx_gpu->shadow_bo) {
a6xx_gpu->shadow = msm_gem_kernel_new_locked(gpu->dev,
sizeof(u32) * gpu->nr_rings,
MSM_BO_UNCACHED | MSM_BO_MAP_PRIV,
gpu->aspace, &a6xx_gpu->shadow_bo,
&a6xx_gpu->shadow_iova);
if (IS_ERR(a6xx_gpu->shadow))
return PTR_ERR(a6xx_gpu->shadow);
}
gpu_write64(gpu, REG_A6XX_CP_RB_RPTR_ADDR_LO,
REG_A6XX_CP_RB_RPTR_ADDR_HI,
shadowptr(a6xx_gpu, gpu->rb[0]));
}
/* Always come up on rb 0 */
a6xx_gpu->cur_ring = gpu->rb[0];
a6xx_gpu->cur_ctx = NULL;
/* Enable the SQE_to start the CP engine */
gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1);
@ -911,18 +1022,6 @@ static irqreturn_t a6xx_irq(struct msm_gpu *gpu)
return IRQ_HANDLED;
}
static const u32 a6xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A6XX_CP_RB_BASE),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE_HI, REG_A6XX_CP_RB_BASE_HI),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR,
REG_A6XX_CP_RB_RPTR_ADDR_LO),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR_HI,
REG_A6XX_CP_RB_RPTR_ADDR_HI),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A6XX_CP_RB_RPTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A6XX_CP_RB_WPTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A6XX_CP_RB_CNTL),
};
static int a6xx_pm_resume(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@ -931,6 +1030,8 @@ static int a6xx_pm_resume(struct msm_gpu *gpu)
gpu->needs_hw_init = true;
trace_msm_gpu_resume(0);
ret = a6xx_gmu_resume(a6xx_gpu);
if (ret)
return ret;
@ -945,6 +1046,8 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
trace_msm_gpu_suspend(0);
devfreq_suspend_device(gpu->devfreq.devfreq);
return a6xx_gmu_stop(a6xx_gpu);
@ -983,6 +1086,11 @@ static void a6xx_destroy(struct msm_gpu *gpu)
drm_gem_object_put(a6xx_gpu->sqe_bo);
}
if (a6xx_gpu->shadow_bo) {
msm_gem_unpin_iova(a6xx_gpu->shadow_bo, gpu->aspace);
drm_gem_object_put(a6xx_gpu->shadow_bo);
}
a6xx_gmu_remove(a6xx_gpu);
adreno_gpu_cleanup(adreno_gpu);
@ -1017,6 +1125,31 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
return (unsigned long)busy_time;
}
static struct msm_gem_address_space *
a6xx_create_private_address_space(struct msm_gpu *gpu)
{
struct msm_mmu *mmu;
mmu = msm_iommu_pagetable_create(gpu->aspace->mmu);
if (IS_ERR(mmu))
return ERR_CAST(mmu);
return msm_gem_address_space_create(mmu,
"gpu", 0x100000000ULL, 0x1ffffffffULL);
}
static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami)
return a6xx_gpu->shadow[ring->id];
return ring->memptrs->rptr = gpu_read(gpu, REG_A6XX_CP_RB_RPTR);
}
static const struct adreno_gpu_funcs funcs = {
.base = {
.get_param = adreno_get_param,
@ -1025,7 +1158,6 @@ static const struct adreno_gpu_funcs funcs = {
.pm_resume = a6xx_pm_resume,
.recover = a6xx_recover,
.submit = a6xx_submit,
.flush = a6xx_flush,
.active_ring = a6xx_active_ring,
.irq = a6xx_irq,
.destroy = a6xx_destroy,
@ -1040,6 +1172,8 @@ static const struct adreno_gpu_funcs funcs = {
.gpu_state_put = a6xx_gpu_state_put,
#endif
.create_address_space = adreno_iommu_create_address_space,
.create_private_address_space = a6xx_create_private_address_space,
.get_rptr = a6xx_get_rptr,
},
.get_timestamp = a6xx_get_timestamp,
};
@ -1048,6 +1182,8 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
struct adreno_platform_config *config = pdev->dev.platform_data;
const struct adreno_info *info;
struct device_node *node;
struct a6xx_gpu *a6xx_gpu;
struct adreno_gpu *adreno_gpu;
@ -1062,9 +1198,15 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
gpu = &adreno_gpu->base;
adreno_gpu->registers = NULL;
adreno_gpu->reg_offsets = a6xx_register_offsets;
if (adreno_is_a650(adreno_gpu))
/*
* We need to know the platform type before calling into adreno_gpu_init
* so that the hw_apriv flag can be correctly set. Snoop into the info
* and grab the revision number
*/
info = adreno_info(config->rev);
if (info && info->revn == 650)
adreno_gpu->base.hw_apriv = true;
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);

View File

@ -19,8 +19,15 @@ struct a6xx_gpu {
uint64_t sqe_iova;
struct msm_ringbuffer *cur_ring;
struct msm_file_private *cur_ctx;
struct a6xx_gmu gmu;
struct drm_gem_object *shadow_bo;
uint64_t shadow_iova;
uint32_t *shadow;
bool has_whereami;
};
#define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base)
@ -50,6 +57,9 @@ static inline bool a6xx_has_gbif(struct adreno_gpu *gpu)
return true;
}
#define shadowptr(_a6xx_gpu, _ring) ((_a6xx_gpu)->shadow_iova + \
((_ring)->id * sizeof(uint32_t)))
int a6xx_gmu_resume(struct a6xx_gpu *gpu);
int a6xx_gmu_stop(struct a6xx_gpu *gpu);

View File

@ -875,7 +875,7 @@ static void a6xx_get_indexed_registers(struct msm_gpu *gpu,
int i;
a6xx_state->indexed_regs = state_kcalloc(a6xx_state, count,
sizeof(a6xx_state->indexed_regs));
sizeof(*a6xx_state->indexed_regs));
if (!a6xx_state->indexed_regs)
return;

View File

@ -282,7 +282,7 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
int ret;
if (pdev)
gpu = platform_get_drvdata(pdev);
gpu = dev_to_gpu(&pdev->dev);
if (!gpu) {
dev_err_once(dev->dev, "no GPU device was found\n");
@ -417,15 +417,13 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
return PTR_ERR(gpu);
}
dev_set_drvdata(dev, gpu);
return 0;
}
static void adreno_unbind(struct device *dev, struct device *master,
void *data)
{
struct msm_gpu *gpu = dev_get_drvdata(dev);
struct msm_gpu *gpu = dev_to_gpu(dev);
pm_runtime_force_suspend(dev);
gpu->funcs->destroy(gpu);
@ -490,16 +488,14 @@ static const struct of_device_id dt_match[] = {
#ifdef CONFIG_PM
static int adreno_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct msm_gpu *gpu = platform_get_drvdata(pdev);
struct msm_gpu *gpu = dev_to_gpu(dev);
return gpu->funcs->pm_resume(gpu);
}
static int adreno_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct msm_gpu *gpu = platform_get_drvdata(pdev);
struct msm_gpu *gpu = dev_to_gpu(dev);
return gpu->funcs->pm_suspend(gpu);
}

View File

@ -189,12 +189,27 @@ struct msm_gem_address_space *
adreno_iommu_create_address_space(struct msm_gpu *gpu,
struct platform_device *pdev)
{
struct iommu_domain *iommu = iommu_domain_alloc(&platform_bus_type);
struct msm_mmu *mmu = msm_iommu_new(&pdev->dev, iommu);
struct iommu_domain *iommu;
struct msm_mmu *mmu;
struct msm_gem_address_space *aspace;
u64 start, size;
aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M,
0xffffffff - SZ_16M);
iommu = iommu_domain_alloc(&platform_bus_type);
if (!iommu)
return NULL;
mmu = msm_iommu_new(&pdev->dev, iommu);
/*
* Use the aperture start or SZ_16M, whichever is greater. This will
* ensure that we align with the allocated pagetable range while still
* allowing room in the lower 32 bits for GMEM and whatnot
*/
start = max_t(u64, SZ_16M, iommu->geometry.aperture_start);
size = iommu->geometry.aperture_end - start + 1;
aspace = msm_gem_address_space_create(mmu, "gpu",
start & GENMASK(48, 0), size);
if (IS_ERR(aspace) && !IS_ERR(mmu))
mmu->funcs->destroy(mmu);
@ -407,8 +422,9 @@ int adreno_hw_init(struct msm_gpu *gpu)
static uint32_t get_rptr(struct adreno_gpu *adreno_gpu,
struct msm_ringbuffer *ring)
{
return ring->memptrs->rptr = adreno_gpu_read(
adreno_gpu, REG_ADRENO_CP_RB_RPTR);
struct msm_gpu *gpu = &adreno_gpu->base;
return gpu->funcs->get_rptr(gpu, ring);
}
struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu)
@ -434,81 +450,8 @@ void adreno_recover(struct msm_gpu *gpu)
}
}
void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx)
void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, u32 reg)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct msm_drm_private *priv = gpu->dev->dev_private;
struct msm_ringbuffer *ring = submit->ring;
unsigned i;
for (i = 0; i < submit->nr_cmds; i++) {
switch (submit->cmd[i].type) {
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
/* ignore IB-targets */
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
/* ignore if there has not been a ctx switch: */
if (priv->lastctx == ctx)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
OUT_PKT3(ring, adreno_is_a4xx(adreno_gpu) ?
CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
OUT_RING(ring, submit->cmd[i].size);
OUT_PKT2(ring);
break;
}
}
OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
OUT_RING(ring, submit->seqno);
if (adreno_is_a3xx(adreno_gpu) || adreno_is_a4xx(adreno_gpu)) {
/* Flush HLSQ lazy updates to make sure there is nothing
* pending for indirect loads after the timestamp has
* passed:
*/
OUT_PKT3(ring, CP_EVENT_WRITE, 1);
OUT_RING(ring, HLSQ_FLUSH);
}
/* wait for idle before cache flush/interrupt */
OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
OUT_RING(ring, 0x00000000);
if (!adreno_is_a2xx(adreno_gpu)) {
/* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
OUT_PKT3(ring, CP_EVENT_WRITE, 3);
OUT_RING(ring, CACHE_FLUSH_TS | BIT(31));
OUT_RING(ring, rbmemptr(ring, fence));
OUT_RING(ring, submit->seqno);
} else {
/* BIT(31) means something else on a2xx */
OUT_PKT3(ring, CP_EVENT_WRITE, 3);
OUT_RING(ring, CACHE_FLUSH_TS);
OUT_RING(ring, rbmemptr(ring, fence));
OUT_RING(ring, submit->seqno);
OUT_PKT3(ring, CP_INTERRUPT, 1);
OUT_RING(ring, 0x80000000);
}
#if 0
if (adreno_is_a3xx(adreno_gpu)) {
/* Dummy set-constant to trigger context rollover */
OUT_PKT3(ring, CP_SET_CONSTANT, 2);
OUT_RING(ring, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG));
OUT_RING(ring, 0x00000000);
}
#endif
gpu->funcs->flush(gpu, ring);
}
void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
uint32_t wptr;
/* Copy the shadow to the actual register */
@ -524,7 +467,7 @@ void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
/* ensure writes to ringbuffer have hit system memory: */
mb();
adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_WPTR, wptr);
gpu_write(gpu, reg, wptr);
}
bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)

View File

@ -17,29 +17,8 @@
#include "adreno_common.xml.h"
#include "adreno_pm4.xml.h"
#define REG_ADRENO_DEFINE(_offset, _reg) [_offset] = (_reg) + 1
#define REG_SKIP ~0
#define REG_ADRENO_SKIP(_offset) [_offset] = REG_SKIP
extern bool snapshot_debugbus;
/**
* adreno_regs: List of registers that are used in across all
* 3D devices. Each device type has different offset value for the same
* register, so an array of register offsets are declared for every device
* and are indexed by the enumeration values defined in this enum
*/
enum adreno_regs {
REG_ADRENO_CP_RB_BASE,
REG_ADRENO_CP_RB_BASE_HI,
REG_ADRENO_CP_RB_RPTR_ADDR,
REG_ADRENO_CP_RB_RPTR_ADDR_HI,
REG_ADRENO_CP_RB_RPTR,
REG_ADRENO_CP_RB_WPTR,
REG_ADRENO_CP_RB_CNTL,
REG_ADRENO_REGISTER_MAX,
};
enum {
ADRENO_FW_PM4 = 0,
ADRENO_FW_SQE = 0, /* a6xx */
@ -176,11 +155,6 @@ static inline bool adreno_is_a225(struct adreno_gpu *gpu)
return gpu->revn == 225;
}
static inline bool adreno_is_a3xx(struct adreno_gpu *gpu)
{
return (gpu->revn >= 300) && (gpu->revn < 400);
}
static inline bool adreno_is_a305(struct adreno_gpu *gpu)
{
return gpu->revn == 305;
@ -207,11 +181,6 @@ static inline bool adreno_is_a330v2(struct adreno_gpu *gpu)
return adreno_is_a330(gpu) && (gpu->rev.patchid > 0);
}
static inline bool adreno_is_a4xx(struct adreno_gpu *gpu)
{
return (gpu->revn >= 400) && (gpu->revn < 500);
}
static inline int adreno_is_a405(struct adreno_gpu *gpu)
{
return gpu->revn == 405;
@ -269,9 +238,7 @@ struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu,
const struct firmware *fw, u64 *iova);
int adreno_hw_init(struct msm_gpu *gpu);
void adreno_recover(struct msm_gpu *gpu);
void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx);
void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, u32 reg);
bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
@ -365,59 +332,12 @@ OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23));
}
/*
* adreno_reg_check() - Checks the validity of a register enum
* @gpu: Pointer to struct adreno_gpu
* @offset_name: The register enum that is checked
*/
static inline bool adreno_reg_check(struct adreno_gpu *gpu,
enum adreno_regs offset_name)
{
BUG_ON(offset_name >= REG_ADRENO_REGISTER_MAX || !gpu->reg_offsets[offset_name]);
/*
* REG_SKIP is a special value that tell us that the register in
* question isn't implemented on target but don't trigger a BUG(). This
* is used to cleanly implement adreno_gpu_write64() and
* adreno_gpu_read64() in a generic fashion
*/
if (gpu->reg_offsets[offset_name] == REG_SKIP)
return false;
return true;
}
static inline u32 adreno_gpu_read(struct adreno_gpu *gpu,
enum adreno_regs offset_name)
{
u32 reg = gpu->reg_offsets[offset_name];
u32 val = 0;
if(adreno_reg_check(gpu,offset_name))
val = gpu_read(&gpu->base, reg - 1);
return val;
}
static inline void adreno_gpu_write(struct adreno_gpu *gpu,
enum adreno_regs offset_name, u32 data)
{
u32 reg = gpu->reg_offsets[offset_name];
if(adreno_reg_check(gpu, offset_name))
gpu_write(&gpu->base, reg - 1, data);
}
struct msm_gpu *a2xx_gpu_init(struct drm_device *dev);
struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
struct msm_gpu *a5xx_gpu_init(struct drm_device *dev);
struct msm_gpu *a6xx_gpu_init(struct drm_device *dev);
static inline void adreno_gpu_write64(struct adreno_gpu *gpu,
enum adreno_regs lo, enum adreno_regs hi, u64 data)
{
adreno_gpu_write(gpu, lo, lower_32_bits(data));
adreno_gpu_write(gpu, hi, upper_32_bits(data));
}
static inline uint32_t get_wptr(struct msm_ringbuffer *ring)
{
return (ring->cur - ring->start) % (MSM_GPU_RINGBUFFER_SZ >> 2);

View File

@ -298,6 +298,7 @@ enum adreno_pm4_type3_packets {
CP_SET_BIN_DATA5_OFFSET = 46,
CP_SET_CTXSWITCH_IB = 85,
CP_REG_WRITE = 109,
CP_WHERE_AM_I = 98,
};
enum adreno_state_block {

View File

@ -288,19 +288,6 @@ static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
}
#ifdef CONFIG_DEBUG_FS
#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \
static int __prefix ## _open(struct inode *inode, struct file *file) \
{ \
return single_open(file, __prefix ## _show, inode->i_private); \
} \
static const struct file_operations __prefix ## _fops = { \
.owner = THIS_MODULE, \
.open = __prefix ## _open, \
.release = single_release, \
.read = seq_read, \
.llseek = seq_lseek, \
}
static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
{
struct dpu_irq *irq_obj = s->private;
@ -328,7 +315,7 @@ static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
return 0;
}
DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_core_irq);
DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_core_irq);
void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
struct dentry *parent)

View File

@ -30,6 +30,74 @@ enum dpu_perf_mode {
DPU_PERF_MODE_MAX
};
/**
* @_dpu_core_perf_calc_bw() - to calculate BW per crtc
* @kms - pointer to the dpu_kms
* @crtc - pointer to a crtc
* Return: returns aggregated BW for all planes in crtc.
*/
static u64 _dpu_core_perf_calc_bw(struct dpu_kms *kms,
struct drm_crtc *crtc)
{
struct drm_plane *plane;
struct dpu_plane_state *pstate;
u64 crtc_plane_bw = 0;
u32 bw_factor;
drm_atomic_crtc_for_each_plane(plane, crtc) {
pstate = to_dpu_plane_state(plane->state);
if (!pstate)
continue;
crtc_plane_bw += pstate->plane_fetch_bw;
}
bw_factor = kms->catalog->perf.bw_inefficiency_factor;
if (bw_factor) {
crtc_plane_bw *= bw_factor;
do_div(crtc_plane_bw, 100);
}
return crtc_plane_bw;
}
/**
* _dpu_core_perf_calc_clk() - to calculate clock per crtc
* @kms - pointer to the dpu_kms
* @crtc - pointer to a crtc
* @state - pointer to a crtc state
* Return: returns max clk for all planes in crtc.
*/
static u64 _dpu_core_perf_calc_clk(struct dpu_kms *kms,
struct drm_crtc *crtc, struct drm_crtc_state *state)
{
struct drm_plane *plane;
struct dpu_plane_state *pstate;
struct drm_display_mode *mode;
u64 crtc_clk;
u32 clk_factor;
mode = &state->adjusted_mode;
crtc_clk = mode->vtotal * mode->hdisplay * drm_mode_vrefresh(mode);
drm_atomic_crtc_for_each_plane(plane, crtc) {
pstate = to_dpu_plane_state(plane->state);
if (!pstate)
continue;
crtc_clk = max(pstate->plane_clk, crtc_clk);
}
clk_factor = kms->catalog->perf.clk_inefficiency_factor;
if (clk_factor) {
crtc_clk *= clk_factor;
do_div(crtc_clk, 100);
}
return crtc_clk;
}
static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
{
struct msm_drm_private *priv;
@ -52,12 +120,7 @@ static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
dpu_cstate = to_dpu_crtc_state(state);
memset(perf, 0, sizeof(struct dpu_core_perf_params));
if (!dpu_cstate->bw_control) {
perf->bw_ctl = kms->catalog->perf.max_bw_high *
1000ULL;
perf->max_per_pipe_ib = perf->bw_ctl;
perf->core_clk_rate = kms->perf.max_core_clk_rate;
} else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) {
if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) {
perf->bw_ctl = 0;
perf->max_per_pipe_ib = 0;
perf->core_clk_rate = 0;
@ -65,6 +128,10 @@ static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
perf->bw_ctl = kms->perf.fix_core_ab_vote;
perf->max_per_pipe_ib = kms->perf.fix_core_ib_vote;
perf->core_clk_rate = kms->perf.fix_core_clk_rate;
} else {
perf->bw_ctl = _dpu_core_perf_calc_bw(kms, crtc);
perf->max_per_pipe_ib = kms->catalog->perf.min_dram_ib;
perf->core_clk_rate = _dpu_core_perf_calc_clk(kms, crtc, state);
}
DPU_DEBUG(
@ -116,11 +183,7 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
DPU_DEBUG("crtc:%d bw:%llu ctrl:%d\n",
tmp_crtc->base.id, tmp_cstate->new_perf.bw_ctl,
tmp_cstate->bw_control);
/*
* For bw check only use the bw if the
* atomic property has been already set
*/
if (tmp_cstate->bw_control)
bw_sum_of_intfs += tmp_cstate->new_perf.bw_ctl;
}
@ -132,9 +195,7 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
DPU_DEBUG("final threshold bw limit = %d\n", threshold);
if (!dpu_cstate->bw_control) {
DPU_DEBUG("bypass bandwidth check\n");
} else if (!threshold) {
if (!threshold) {
DPU_ERROR("no bandwidth limits specified\n");
return -E2BIG;
} else if (bw > threshold) {
@ -155,7 +216,11 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
= dpu_crtc_get_client_type(crtc);
struct drm_crtc *tmp_crtc;
struct dpu_crtc_state *dpu_cstate;
int ret = 0;
int i, ret = 0;
u64 avg_bw;
if (!kms->num_paths)
return -EINVAL;
drm_for_each_crtc(tmp_crtc, crtc->dev) {
if (tmp_crtc->enabled &&
@ -166,10 +231,20 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
perf.max_per_pipe_ib = max(perf.max_per_pipe_ib,
dpu_cstate->new_perf.max_per_pipe_ib);
DPU_DEBUG("crtc=%d bw=%llu\n", tmp_crtc->base.id,
dpu_cstate->new_perf.bw_ctl);
perf.bw_ctl += dpu_cstate->new_perf.bw_ctl;
DPU_DEBUG("crtc=%d bw=%llu paths:%d\n",
tmp_crtc->base.id,
dpu_cstate->new_perf.bw_ctl, kms->num_paths);
}
}
avg_bw = perf.bw_ctl;
do_div(avg_bw, (kms->num_paths * 1000)); /*Bps_to_icc*/
for (i = 0; i < kms->num_paths; i++)
icc_set_bw(kms->path[i], avg_bw, perf.max_per_pipe_ib);
return ret;
}

View File

@ -265,11 +265,6 @@ enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
{
struct drm_encoder *encoder;
if (!crtc) {
DPU_ERROR("invalid crtc\n");
return INTF_MODE_NONE;
}
/*
* TODO: This function is called from dpu debugfs and as part of atomic
* check. When called from debugfs, the crtc->mutex must be held to
@ -297,7 +292,6 @@ void dpu_crtc_vblank_callback(struct drm_crtc *crtc)
dpu_crtc->vblank_cb_time = ktime_get();
else
dpu_crtc->vblank_cb_count++;
_dpu_crtc_complete_flip(crtc);
drm_crtc_handle_vblank(crtc);
trace_dpu_crtc_vblank_cb(DRMID(crtc));
}
@ -402,6 +396,7 @@ static void dpu_crtc_frame_event_cb(void *data, u32 event)
void dpu_crtc_complete_commit(struct drm_crtc *crtc)
{
trace_dpu_crtc_complete_commit(DRMID(crtc));
_dpu_crtc_complete_flip(crtc);
}
static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
@ -421,8 +416,6 @@ static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
}
drm_mode_debug_printmodeline(adj_mode);
}
static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state,
@ -457,7 +450,6 @@ static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc)
struct dpu_crtc_mixer *mixer = cstate->mixers;
struct dpu_hw_pcc_cfg cfg;
struct dpu_hw_ctl *ctl;
struct dpu_hw_mixer *lm;
struct dpu_hw_dspp *dspp;
int i;
@ -467,7 +459,6 @@ static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc)
for (i = 0; i < cstate->num_mixers; i++) {
ctl = mixer[i].lm_ctl;
lm = mixer[i].hw_lm;
dspp = mixer[i].hw_dspp;
if (!dspp || !dspp->ops.setup_pcc)
@ -496,16 +487,8 @@ static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc)
static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
struct dpu_crtc *dpu_crtc;
struct dpu_crtc_state *cstate;
struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
struct drm_encoder *encoder;
struct drm_device *dev;
unsigned long flags;
if (!crtc) {
DPU_ERROR("invalid crtc\n");
return;
}
if (!crtc->state->enable) {
DPU_DEBUG("crtc%d -> enable %d, skip atomic_begin\n",
@ -515,21 +498,8 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
DPU_DEBUG("crtc%d\n", crtc->base.id);
dpu_crtc = to_dpu_crtc(crtc);
cstate = to_dpu_crtc_state(crtc->state);
dev = crtc->dev;
_dpu_crtc_setup_lm_bounds(crtc, crtc->state);
if (dpu_crtc->event) {
WARN_ON(dpu_crtc->event);
} else {
spin_lock_irqsave(&dev->event_lock, flags);
dpu_crtc->event = crtc->state->event;
crtc->state->event = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
}
/* encoder will trigger pending mask now */
drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
dpu_encoder_trigger_kickoff_pending(encoder);
@ -583,14 +553,11 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
return;
}
if (dpu_crtc->event) {
DPU_DEBUG("already received dpu_crtc->event\n");
} else {
spin_lock_irqsave(&dev->event_lock, flags);
dpu_crtc->event = crtc->state->event;
crtc->state->event = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
}
WARN_ON(dpu_crtc->event);
spin_lock_irqsave(&dev->event_lock, flags);
dpu_crtc->event = crtc->state->event;
crtc->state->event = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
/*
* If no mixers has been allocated in dpu_crtc_atomic_check(),
@ -635,14 +602,7 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
struct dpu_crtc_state *cstate;
if (!crtc || !state) {
DPU_ERROR("invalid argument(s)\n");
return;
}
cstate = to_dpu_crtc_state(state);
struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
DPU_DEBUG("crtc%d\n", crtc->base.id);
@ -731,14 +691,8 @@ static void dpu_crtc_reset(struct drm_crtc *crtc)
*/
static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
{
struct dpu_crtc_state *cstate, *old_cstate;
struct dpu_crtc_state *cstate, *old_cstate = to_dpu_crtc_state(crtc->state);
if (!crtc || !crtc->state) {
DPU_ERROR("invalid argument(s)\n");
return NULL;
}
old_cstate = to_dpu_crtc_state(crtc->state);
cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
if (!cstate) {
DPU_ERROR("failed to allocate state\n");
@ -754,19 +708,12 @@ static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
static void dpu_crtc_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct dpu_crtc *dpu_crtc;
struct dpu_crtc_state *cstate;
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
struct drm_encoder *encoder;
unsigned long flags;
bool release_bandwidth = false;
if (!crtc || !crtc->state) {
DPU_ERROR("invalid crtc\n");
return;
}
dpu_crtc = to_dpu_crtc(crtc);
cstate = to_dpu_crtc_state(crtc->state);
DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
/* Disable/save vblank irq handling */
@ -825,19 +772,13 @@ static void dpu_crtc_disable(struct drm_crtc *crtc,
static void dpu_crtc_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct dpu_crtc *dpu_crtc;
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct drm_encoder *encoder;
bool request_bandwidth = false;
if (!crtc) {
DPU_ERROR("invalid crtc\n");
return;
}
pm_runtime_get_sync(crtc->dev->dev);
DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
dpu_crtc = to_dpu_crtc(crtc);
drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
/* in video mode, we hold an extra bandwidth reference
@ -873,15 +814,15 @@ struct plane_state {
static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
struct dpu_crtc *dpu_crtc;
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
struct plane_state *pstates;
struct dpu_crtc_state *cstate;
const struct drm_plane_state *pstate;
struct drm_plane *plane;
struct drm_display_mode *mode;
int cnt = 0, rc = 0, mixer_width, i, z_pos;
int cnt = 0, rc = 0, mixer_width = 0, i, z_pos;
struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
int multirect_count = 0;
@ -889,16 +830,8 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
int left_zpos_cnt = 0, right_zpos_cnt = 0;
struct drm_rect crtc_rect = { 0 };
if (!crtc) {
DPU_ERROR("invalid crtc\n");
return -EINVAL;
}
pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
dpu_crtc = to_dpu_crtc(crtc);
cstate = to_dpu_crtc_state(state);
if (!state->enable || !state->active) {
DPU_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n",
crtc->base.id, state->enable, state->active);
@ -914,9 +847,11 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
memset(pipe_staged, 0, sizeof(pipe_staged));
mixer_width = mode->hdisplay / cstate->num_mixers;
if (cstate->num_mixers) {
mixer_width = mode->hdisplay / cstate->num_mixers;
_dpu_crtc_setup_lm_bounds(crtc, state);
_dpu_crtc_setup_lm_bounds(crtc, state);
}
crtc_rect.x2 = mode->hdisplay;
crtc_rect.y2 = mode->vdisplay;
@ -1242,23 +1177,7 @@ static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
return 0;
}
static int _dpu_debugfs_status_open(struct inode *inode, struct file *file)
{
return single_open(file, _dpu_debugfs_status_show, inode->i_private);
}
#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \
static int __prefix ## _open(struct inode *inode, struct file *file) \
{ \
return single_open(file, __prefix ## _show, inode->i_private); \
} \
static const struct file_operations __prefix ## _fops = { \
.owner = THIS_MODULE, \
.open = __prefix ## _open, \
.release = single_release, \
.read = seq_read, \
.llseek = seq_lseek, \
}
DEFINE_SHOW_ATTRIBUTE(_dpu_debugfs_status);
static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
{
@ -1275,25 +1194,18 @@ static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
return 0;
}
DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_crtc_debugfs_state);
DEFINE_SHOW_ATTRIBUTE(dpu_crtc_debugfs_state);
static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
{
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
static const struct file_operations debugfs_status_fops = {
.open = _dpu_debugfs_status_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
crtc->dev->primary->debugfs_root);
debugfs_create_file("status", 0400,
dpu_crtc->debugfs_root,
dpu_crtc, &debugfs_status_fops);
dpu_crtc, &_dpu_debugfs_status_fops);
debugfs_create_file("state", 0600,
dpu_crtc->debugfs_root,
&dpu_crtc->base,

View File

@ -1001,6 +1001,9 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
trace_dpu_enc_mode_set(DRMID(drm_enc));
if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp)
msm_dp_display_mode_set(priv->dp, drm_enc, mode, adj_mode);
list_for_each_entry(conn_iter, connector_list, head)
if (conn_iter->encoder == drm_enc)
conn = conn_iter;
@ -1109,6 +1112,13 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
return;
}
if (dpu_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DisplayPort &&
dpu_enc->cur_master->hw_mdptop &&
dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select)
dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select(
dpu_enc->cur_master->hw_mdptop);
_dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI &&
@ -1146,6 +1156,7 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc = NULL;
int ret = 0;
struct msm_drm_private *priv;
struct drm_display_mode *cur_mode = NULL;
if (!drm_enc) {
@ -1156,6 +1167,7 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
mutex_lock(&dpu_enc->enc_lock);
cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
priv = drm_enc->dev->dev_private;
trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
cur_mode->vdisplay);
@ -1176,6 +1188,15 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
_dpu_encoder_virt_enable_helper(drm_enc);
if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp) {
ret = msm_dp_display_enable(priv->dp,
drm_enc);
if (ret) {
DPU_ERROR_ENC(dpu_enc, "dp display enable failed: %d\n",
ret);
goto out;
}
}
dpu_enc->enabled = true;
out:
@ -1211,6 +1232,11 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
/* wait for idle */
dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp) {
if (msm_dp_display_pre_disable(priv->dp, drm_enc))
DPU_ERROR_ENC(dpu_enc, "dp display push idle failed\n");
}
dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP);
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
@ -1220,6 +1246,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
phys->ops.disable(phys);
}
/* after phys waits for frame-done, should be no more frames pending */
if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id);
@ -1234,6 +1261,11 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp) {
if (msm_dp_display_disable(priv->dp, drm_enc))
DPU_ERROR_ENC(dpu_enc, "dp display disable failed\n");
}
mutex_unlock(&dpu_enc->enc_lock);
}
@ -1880,24 +1912,13 @@ static int _dpu_encoder_status_show(struct seq_file *s, void *data)
return 0;
}
static int _dpu_encoder_debugfs_status_open(struct inode *inode,
struct file *file)
{
return single_open(file, _dpu_encoder_status_show, inode->i_private);
}
DEFINE_SHOW_ATTRIBUTE(_dpu_encoder_status);
static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
int i;
static const struct file_operations debugfs_status_fops = {
.open = _dpu_encoder_debugfs_status_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
char name[DPU_NAME_SIZE];
if (!drm_enc->dev) {
@ -1913,7 +1934,7 @@ static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
/* don't error check these */
debugfs_create_file("status", 0600,
dpu_enc->debugfs_root, dpu_enc, &debugfs_status_fops);
dpu_enc->debugfs_root, dpu_enc, &_dpu_encoder_status_fops);
for (i = 0; i < dpu_enc->num_phys_encs; i++)
if (dpu_enc->phys_encs[i]->ops.late_register)
@ -2008,7 +2029,7 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
{
int ret = 0;
int i = 0;
enum dpu_intf_type intf_type;
enum dpu_intf_type intf_type = INTF_NONE;
struct dpu_enc_phys_init_params phys_params;
if (!dpu_enc) {
@ -2030,9 +2051,9 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
case DRM_MODE_ENCODER_DSI:
intf_type = INTF_DSI;
break;
default:
DPU_ERROR_ENC(dpu_enc, "unsupported display interface type\n");
return -EINVAL;
case DRM_MODE_ENCODER_TMDS:
intf_type = INTF_DP;
break;
}
WARN_ON(disp_info->num_of_h_tiles < 1);

View File

@ -100,6 +100,14 @@ static void drm_mode_to_intf_timing_params(
* display_v_end -= mode->hsync_start - mode->hdisplay;
* }
*/
/* for DP/EDP, Shift timings to align it to bottom right */
if ((phys_enc->hw_intf->cap->type == INTF_DP) ||
(phys_enc->hw_intf->cap->type == INTF_EDP)) {
timing->h_back_porch += timing->h_front_porch;
timing->h_front_porch = 0;
timing->v_back_porch += timing->v_front_porch;
timing->v_front_porch = 0;
}
}
static u32 get_horizontal_total(const struct intf_timing_params *timing)
@ -298,7 +306,6 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
struct dpu_hw_ctl *hw_ctl;
unsigned long lock_flags;
u32 flush_register = 0;
int new_cnt = -1, old_cnt = -1;
hw_ctl = phys_enc->hw_ctl;
@ -308,7 +315,7 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
phys_enc->parent_ops->handle_vblank_virt(phys_enc->parent,
phys_enc);
old_cnt = atomic_read(&phys_enc->pending_kickoff_cnt);
atomic_read(&phys_enc->pending_kickoff_cnt);
/*
* only decrement the pending flush count if we've actually flushed
@ -320,8 +327,7 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
if (!(flush_register & hw_ctl->ops.get_pending_flush(hw_ctl)))
new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt,
-1, 0);
atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
/* Signal any waiting atomic commit thread */

View File

@ -684,7 +684,8 @@ static const struct dpu_perf_cfg sc7180_perf_data = {
.max_bw_high = 6800000,
.min_core_ib = 2400000,
.min_llcc_ib = 800000,
.min_dram_ib = 800000,
.min_dram_ib = 1600000,
.min_prefill_lines = 24,
.danger_lut_tbl = {0xff, 0xffff, 0x0},
.qos_lut_tbl = {
{.nentry = ARRAY_SIZE(sc7180_qos_linear),
@ -701,6 +702,8 @@ static const struct dpu_perf_cfg sc7180_perf_data = {
{.rd_enable = 1, .wr_enable = 1},
{.rd_enable = 1, .wr_enable = 0}
},
.clk_inefficiency_factor = 105,
.bw_inefficiency_factor = 120,
};
static const struct dpu_perf_cfg sm8150_perf_data = {

View File

@ -659,6 +659,8 @@ struct dpu_perf_cdp_cfg {
* @downscaling_prefill_lines downscaling latency in lines
* @amortizable_theshold minimum y position for traffic shaping prefill
* @min_prefill_lines minimum pipeline latency in lines
* @clk_inefficiency_factor DPU src clock inefficiency factor
* @bw_inefficiency_factor DPU axi bus bw inefficiency factor
* @safe_lut_tbl: LUT tables for safe signals
* @danger_lut_tbl: LUT tables for danger signals
* @qos_lut_tbl: LUT tables for QoS signals
@ -683,6 +685,8 @@ struct dpu_perf_cfg {
u32 downscaling_prefill_lines;
u32 amortizable_threshold;
u32 min_prefill_lines;
u32 clk_inefficiency_factor;
u32 bw_inefficiency_factor;
u32 safe_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
u32 danger_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
struct dpu_qos_lut_tbl qos_lut_tbl[DPU_QOS_LUT_USAGE_MAX];

View File

@ -85,30 +85,17 @@ static int _dpu_danger_signal_status(struct seq_file *s,
return 0;
}
#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \
static int __prefix ## _open(struct inode *inode, struct file *file) \
{ \
return single_open(file, __prefix ## _show, inode->i_private); \
} \
static const struct file_operations __prefix ## _fops = { \
.owner = THIS_MODULE, \
.open = __prefix ## _open, \
.release = single_release, \
.read = seq_read, \
.llseek = seq_lseek, \
}
static int dpu_debugfs_danger_stats_show(struct seq_file *s, void *v)
{
return _dpu_danger_signal_status(s, true);
}
DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_danger_stats);
DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_danger_stats);
static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v)
{
return _dpu_danger_signal_status(s, false);
}
DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_safe_stats);
DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_safe_stats);
static void dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
struct dentry *parent)
@ -195,10 +182,15 @@ static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
void *p = dpu_hw_util_get_log_mask_ptr();
struct dentry *entry;
struct drm_device *dev;
struct msm_drm_private *priv;
if (!p)
return -EINVAL;
dev = dpu_kms->dev;
priv = dev->dev_private;
entry = debugfs_create_dir("debug", minor->debugfs_root);
debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, entry, p);
@ -207,6 +199,9 @@ static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
dpu_debugfs_vbif_init(dpu_kms, entry);
dpu_debugfs_core_irq_init(dpu_kms, entry);
if (priv->dp)
msm_dp_debugfs_init(priv->dp, minor);
return dpu_core_perf_debugfs_init(dpu_kms, entry);
}
#endif
@ -290,6 +285,28 @@ static int dpu_kms_global_obj_init(struct dpu_kms *dpu_kms)
return 0;
}
static int dpu_kms_parse_data_bus_icc_path(struct dpu_kms *dpu_kms)
{
struct icc_path *path0;
struct icc_path *path1;
struct drm_device *dev = dpu_kms->dev;
path0 = of_icc_get(dev->dev, "mdp0-mem");
path1 = of_icc_get(dev->dev, "mdp1-mem");
if (IS_ERR_OR_NULL(path0))
return PTR_ERR_OR_ZERO(path0);
dpu_kms->path[0] = path0;
dpu_kms->num_paths = 1;
if (!IS_ERR_OR_NULL(path1)) {
dpu_kms->path[1] = path1;
dpu_kms->num_paths++;
}
return 0;
}
static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
{
return dpu_crtc_vblank(crtc, true);
@ -479,6 +496,33 @@ static int _dpu_kms_initialize_dsi(struct drm_device *dev,
return rc;
}
static int _dpu_kms_initialize_displayport(struct drm_device *dev,
struct msm_drm_private *priv,
struct dpu_kms *dpu_kms)
{
struct drm_encoder *encoder = NULL;
int rc = 0;
if (!priv->dp)
return rc;
encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_TMDS);
if (IS_ERR(encoder)) {
DPU_ERROR("encoder init failed for dsi display\n");
return PTR_ERR(encoder);
}
rc = msm_dp_modeset_init(priv->dp, dev, encoder);
if (rc) {
DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc);
drm_encoder_cleanup(encoder);
return rc;
}
priv->encoders[priv->num_encoders++] = encoder;
return rc;
}
/**
* _dpu_kms_setup_displays - create encoders, bridges and connectors
* for underlying displays
@ -491,12 +535,21 @@ static int _dpu_kms_setup_displays(struct drm_device *dev,
struct msm_drm_private *priv,
struct dpu_kms *dpu_kms)
{
/**
* Extend this function to initialize other
* types of displays
*/
int rc = 0;
return _dpu_kms_initialize_dsi(dev, priv, dpu_kms);
rc = _dpu_kms_initialize_dsi(dev, priv, dpu_kms);
if (rc) {
DPU_ERROR("initialize_dsi failed, rc = %d\n", rc);
return rc;
}
rc = _dpu_kms_initialize_displayport(dev, priv, dpu_kms);
if (rc) {
DPU_ERROR("initialize_DP failed, rc = %d\n", rc);
return rc;
}
return rc;
}
static void _dpu_kms_drm_obj_destroy(struct dpu_kms *dpu_kms)
@ -681,13 +734,20 @@ static void _dpu_kms_set_encoder_mode(struct msm_kms *kms,
info.capabilities = cmd_mode ? MSM_DISPLAY_CAP_CMD_MODE :
MSM_DISPLAY_CAP_VID_MODE;
/* TODO: No support for DSI swap */
for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
if (priv->dsi[i]) {
info.h_tile_instance[info.num_of_h_tiles] = i;
info.num_of_h_tiles++;
switch (info.intf_type) {
case DRM_MODE_ENCODER_DSI:
/* TODO: No support for DSI swap */
for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
if (priv->dsi[i]) {
info.h_tile_instance[info.num_of_h_tiles] = i;
info.num_of_h_tiles++;
}
}
}
break;
case DRM_MODE_ENCODER_TMDS:
info.num_of_h_tiles = 1;
break;
};
rc = dpu_encoder_setup(encoder->dev, encoder, &info);
if (rc)
@ -709,6 +769,23 @@ static void dpu_irq_preinstall(struct msm_kms *kms)
dpu_core_irq_preinstall(dpu_kms);
}
static int dpu_irq_postinstall(struct msm_kms *kms)
{
struct msm_drm_private *priv;
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
if (!dpu_kms || !dpu_kms->dev)
return -EINVAL;
priv = dpu_kms->dev->dev_private;
if (!priv)
return -EINVAL;
msm_dp_irq_postinstall(priv->dp);
return 0;
}
static void dpu_irq_uninstall(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
@ -719,6 +796,7 @@ static void dpu_irq_uninstall(struct msm_kms *kms)
static const struct msm_kms_funcs kms_funcs = {
.hw_init = dpu_kms_hw_init,
.irq_preinstall = dpu_irq_preinstall,
.irq_postinstall = dpu_irq_postinstall,
.irq_uninstall = dpu_irq_uninstall,
.irq = dpu_irq,
.enable_commit = dpu_kms_enable_commit,
@ -952,6 +1030,9 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
dpu_vbif_init_memtypes(dpu_kms);
if (of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss"))
dpu_kms_parse_data_bus_icc_path(dpu_kms);
pm_runtime_put_sync(&dpu_kms->pdev->dev);
return 0;
@ -1079,7 +1160,7 @@ static int dpu_dev_remove(struct platform_device *pdev)
static int __maybe_unused dpu_runtime_suspend(struct device *dev)
{
int rc = -1;
int i, rc = -1;
struct platform_device *pdev = to_platform_device(dev);
struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
struct dss_module_power *mp = &dpu_kms->mp;
@ -1090,6 +1171,9 @@ static int __maybe_unused dpu_runtime_suspend(struct device *dev)
if (rc)
DPU_ERROR("clock disable failed rc:%d\n", rc);
for (i = 0; i < dpu_kms->num_paths; i++)
icc_set_bw(dpu_kms->path[i], 0, 0);
return rc;
}
@ -1101,8 +1185,15 @@ static int __maybe_unused dpu_runtime_resume(struct device *dev)
struct drm_encoder *encoder;
struct drm_device *ddev;
struct dss_module_power *mp = &dpu_kms->mp;
int i;
ddev = dpu_kms->dev;
/* Min vote of BW is required before turning on AXI clk */
for (i = 0; i < dpu_kms->num_paths; i++)
icc_set_bw(dpu_kms->path[i], 0,
dpu_kms->catalog->perf.min_dram_ib);
rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
if (rc) {
DPU_ERROR("clock enable failed rc:%d\n", rc);

View File

@ -8,6 +8,8 @@
#ifndef __DPU_KMS_H__
#define __DPU_KMS_H__
#include <linux/interconnect.h>
#include <drm/drm_drv.h>
#include "msm_drv.h"
@ -140,6 +142,8 @@ struct dpu_kms {
* when disabled.
*/
atomic_t bandwidth_ref;
struct icc_path *path[2];
u32 num_paths;
};
struct vsync_info {

View File

@ -8,7 +8,6 @@
#include <linux/irqdesc.h>
#include <linux/irqchip/chained_irq.h>
#include "dpu_kms.h"
#include <linux/interconnect.h>
#define to_dpu_mdss(x) container_of(x, struct dpu_mdss, base)
@ -277,9 +276,11 @@ int dpu_mdss_init(struct drm_device *dev)
DRM_DEBUG("mapped mdss address space @%pK\n", dpu_mdss->mmio);
ret = dpu_mdss_parse_data_bus_icc_path(dev, dpu_mdss);
if (ret)
return ret;
if (!of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss")) {
ret = dpu_mdss_parse_data_bus_icc_path(dev, dpu_mdss);
if (ret)
return ret;
}
mp = &dpu_mdss->mp;
ret = msm_dss_parse_clock(pdev, mp);

View File

@ -131,6 +131,86 @@ static struct dpu_kms *_dpu_plane_get_kms(struct drm_plane *plane)
return to_dpu_kms(priv->kms);
}
/**
* _dpu_plane_calc_bw - calculate bandwidth required for a plane
* @Plane: Pointer to drm plane.
* Result: Updates calculated bandwidth in the plane state.
* BW Equation: src_w * src_h * bpp * fps * (v_total / v_dest)
* Prefill BW Equation: line src bytes * line_time
*/
static void _dpu_plane_calc_bw(struct drm_plane *plane,
struct drm_framebuffer *fb)
{
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_plane_state *pstate;
struct drm_display_mode *mode;
const struct dpu_format *fmt = NULL;
struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
int src_width, src_height, dst_height, fps;
u64 plane_prefill_bw;
u64 plane_bw;
u32 hw_latency_lines;
u64 scale_factor;
int vbp, vpw;
pstate = to_dpu_plane_state(plane->state);
mode = &plane->state->crtc->mode;
fmt = dpu_get_dpu_format_ext(fb->format->format, fb->modifier);
src_width = drm_rect_width(&pdpu->pipe_cfg.src_rect);
src_height = drm_rect_height(&pdpu->pipe_cfg.src_rect);
dst_height = drm_rect_height(&pdpu->pipe_cfg.dst_rect);
fps = drm_mode_vrefresh(mode);
vbp = mode->vtotal - mode->vsync_end;
vpw = mode->vsync_end - mode->vsync_start;
hw_latency_lines = dpu_kms->catalog->perf.min_prefill_lines;
scale_factor = src_height > dst_height ?
mult_frac(src_height, 1, dst_height) : 1;
plane_bw =
src_width * mode->vtotal * fps * fmt->bpp *
scale_factor;
plane_prefill_bw =
src_width * hw_latency_lines * fps * fmt->bpp *
scale_factor * mode->vtotal;
do_div(plane_prefill_bw, (vbp+vpw));
pstate->plane_fetch_bw = max(plane_bw, plane_prefill_bw);
}
/**
* _dpu_plane_calc_clk - calculate clock required for a plane
* @Plane: Pointer to drm plane.
* Result: Updates calculated clock in the plane state.
* Clock equation: dst_w * v_total * fps * (src_h / dst_h)
*/
static void _dpu_plane_calc_clk(struct drm_plane *plane)
{
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_plane_state *pstate;
struct drm_display_mode *mode;
int dst_width, src_height, dst_height, fps;
pstate = to_dpu_plane_state(plane->state);
mode = &plane->state->crtc->mode;
src_height = drm_rect_height(&pdpu->pipe_cfg.src_rect);
dst_width = drm_rect_width(&pdpu->pipe_cfg.dst_rect);
dst_height = drm_rect_height(&pdpu->pipe_cfg.dst_rect);
fps = drm_mode_vrefresh(mode);
pstate->plane_clk =
dst_width * mode->vtotal * fps;
if (src_height > dst_height) {
pstate->plane_clk *= src_height;
do_div(pstate->plane_clk, dst_height);
}
}
/**
* _dpu_plane_calc_fill_level - calculate fill level of the given source format
* @plane: Pointer to drm plane
@ -1102,6 +1182,10 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
}
_dpu_plane_set_qos_remap(plane);
_dpu_plane_calc_bw(plane, fb);
_dpu_plane_calc_clk(plane);
}
static void _dpu_plane_atomic_disable(struct drm_plane *plane)

View File

@ -25,6 +25,8 @@
* @scaler3_cfg: configuration data for scaler3
* @pixel_ext: configuration data for pixel extensions
* @cdp_cfg: CDP configuration
* @plane_fetch_bw: calculated BW per plane
* @plane_clk: calculated clk per plane
*/
struct dpu_plane_state {
struct drm_plane_state base;
@ -39,6 +41,8 @@ struct dpu_plane_state {
struct dpu_hw_pixel_ext pixel_ext;
struct dpu_hw_pipe_cdp_cfg cdp_cfg;
u64 plane_fetch_bw;
u64 plane_clk;
};
/**

View File

@ -25,54 +25,9 @@ static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
return to_mdp4_kms(to_mdp_kms(priv->kms));
}
#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
#include <mach/board.h>
/* not ironically named at all.. no, really.. */
static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder)
{
struct drm_device *dev = mdp4_dtv_encoder->base.dev;
struct lcdc_platform_data *dtv_pdata = mdp4_find_pdata("dtv.0");
if (!dtv_pdata) {
DRM_DEV_ERROR(dev->dev, "could not find dtv pdata\n");
return;
}
if (dtv_pdata->bus_scale_table) {
mdp4_dtv_encoder->bsc = msm_bus_scale_register_client(
dtv_pdata->bus_scale_table);
DBG("bus scale client: %08x", mdp4_dtv_encoder->bsc);
DBG("lcdc_power_save: %p", dtv_pdata->lcdc_power_save);
if (dtv_pdata->lcdc_power_save)
dtv_pdata->lcdc_power_save(1);
}
}
static void bs_fini(struct mdp4_dtv_encoder *mdp4_dtv_encoder)
{
if (mdp4_dtv_encoder->bsc) {
msm_bus_scale_unregister_client(mdp4_dtv_encoder->bsc);
mdp4_dtv_encoder->bsc = 0;
}
}
static void bs_set(struct mdp4_dtv_encoder *mdp4_dtv_encoder, int idx)
{
if (mdp4_dtv_encoder->bsc) {
DBG("set bus scaling: %d", idx);
msm_bus_scale_client_update_request(mdp4_dtv_encoder->bsc, idx);
}
}
#else
static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder) {}
static void bs_fini(struct mdp4_dtv_encoder *mdp4_dtv_encoder) {}
static void bs_set(struct mdp4_dtv_encoder *mdp4_dtv_encoder, int idx) {}
#endif
static void mdp4_dtv_encoder_destroy(struct drm_encoder *encoder)
{
struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
bs_fini(mdp4_dtv_encoder);
drm_encoder_cleanup(encoder);
kfree(mdp4_dtv_encoder);
}
@ -162,8 +117,6 @@ static void mdp4_dtv_encoder_disable(struct drm_encoder *encoder)
clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk);
clk_disable_unprepare(mdp4_dtv_encoder->mdp_clk);
bs_set(mdp4_dtv_encoder, 0);
mdp4_dtv_encoder->enabled = false;
}
@ -185,8 +138,6 @@ static void mdp4_dtv_encoder_enable(struct drm_encoder *encoder)
MDP4_DMA_CONFIG_PACK(0x21));
mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV, 1);
bs_set(mdp4_dtv_encoder, 1);
DBG("setting mdp_clk=%lu", pc);
ret = clk_set_rate(mdp4_dtv_encoder->mdp_clk, pc);
@ -252,8 +203,6 @@ struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev)
goto fail;
}
bs_init(mdp4_dtv_encoder);
return encoder;
fail:

View File

@ -222,17 +222,4 @@ static inline struct clk *mpd4_lvds_pll_init(struct drm_device *dev)
}
#endif
#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
/* bus scaling data is associated with extra pointless platform devices,
* "dtv", etc.. this is a bit of a hack, but we need a way for encoders
* to find their pdata to make the bus-scaling stuff work.
*/
static inline void *mdp4_find_pdata(const char *devname)
{
struct device *dev;
dev = bus_find_device_by_name(&platform_bus_type, NULL, devname);
return dev ? dev->platform_data : NULL;
}
#endif
#endif /* __MDP4_KMS_H__ */

View File

@ -30,51 +30,10 @@ static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
return to_mdp4_kms(to_mdp_kms(priv->kms));
}
#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
#include <mach/board.h>
static void bs_init(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder)
{
struct drm_device *dev = mdp4_lcdc_encoder->base.dev;
struct lcdc_platform_data *lcdc_pdata = mdp4_find_pdata("lvds.0");
if (!lcdc_pdata) {
DRM_DEV_ERROR(dev->dev, "could not find lvds pdata\n");
return;
}
if (lcdc_pdata->bus_scale_table) {
mdp4_lcdc_encoder->bsc = msm_bus_scale_register_client(
lcdc_pdata->bus_scale_table);
DBG("lvds : bus scale client: %08x", mdp4_lcdc_encoder->bsc);
}
}
static void bs_fini(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder)
{
if (mdp4_lcdc_encoder->bsc) {
msm_bus_scale_unregister_client(mdp4_lcdc_encoder->bsc);
mdp4_lcdc_encoder->bsc = 0;
}
}
static void bs_set(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder, int idx)
{
if (mdp4_lcdc_encoder->bsc) {
DBG("set bus scaling: %d", idx);
msm_bus_scale_client_update_request(mdp4_lcdc_encoder->bsc, idx);
}
}
#else
static void bs_init(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder) {}
static void bs_fini(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder) {}
static void bs_set(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder, int idx) {}
#endif
static void mdp4_lcdc_encoder_destroy(struct drm_encoder *encoder)
{
struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
to_mdp4_lcdc_encoder(encoder);
bs_fini(mdp4_lcdc_encoder);
drm_encoder_cleanup(encoder);
kfree(mdp4_lcdc_encoder);
}
@ -348,8 +307,6 @@ static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder)
DRM_DEV_ERROR(dev->dev, "failed to disable regulator: %d\n", ret);
}
bs_set(mdp4_lcdc_encoder, 0);
mdp4_lcdc_encoder->enabled = false;
}
@ -382,8 +339,6 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
mdp4_crtc_set_config(encoder->crtc, config);
mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV, 0);
bs_set(mdp4_lcdc_encoder, 1);
for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) {
ret = regulator_enable(mdp4_lcdc_encoder->regs[i]);
if (ret)
@ -480,8 +435,6 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
}
mdp4_lcdc_encoder->regs[2] = reg;
bs_init(mdp4_lcdc_encoder);
return encoder;
fail:

View File

@ -14,27 +14,6 @@ static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
return to_mdp5_kms(to_mdp_kms(priv->kms));
}
#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
#include <mach/board.h>
#include <linux/msm-bus.h>
#include <linux/msm-bus-board.h>
static void bs_set(struct mdp5_encoder *mdp5_cmd_enc, int idx)
{
if (mdp5_cmd_enc->bsc) {
DBG("set bus scaling: %d", idx);
/* HACK: scaling down, and then immediately back up
* seems to leave things broken (underflow).. so
* never disable:
*/
idx = 1;
msm_bus_scale_client_update_request(mdp5_cmd_enc->bsc, idx);
}
}
#else
static void bs_set(struct mdp5_encoder *mdp5_cmd_enc, int idx) {}
#endif
#define VSYNC_CLK_RATE 19200000
static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
struct drm_display_mode *mode)
@ -146,8 +125,6 @@ void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
mdp5_ctl_set_encoder_state(ctl, pipeline, false);
mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true);
bs_set(mdp5_cmd_enc, 0);
mdp5_cmd_enc->enabled = false;
}
@ -161,7 +138,6 @@ void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
if (WARN_ON(mdp5_cmd_enc->enabled))
return;
bs_set(mdp5_cmd_enc, 1);
if (pingpong_tearcheck_enable(encoder))
return;

View File

@ -16,72 +16,9 @@ static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
return to_mdp5_kms(to_mdp_kms(priv->kms));
}
#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
#include <mach/board.h>
#include <mach/msm_bus.h>
#include <mach/msm_bus_board.h>
#define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val) \
{ \
.src = MSM_BUS_MASTER_MDP_PORT0, \
.dst = MSM_BUS_SLAVE_EBI_CH0, \
.ab = (ab_val), \
.ib = (ib_val), \
}
static struct msm_bus_vectors mdp_bus_vectors[] = {
MDP_BUS_VECTOR_ENTRY(0, 0),
MDP_BUS_VECTOR_ENTRY(2000000000, 2000000000),
};
static struct msm_bus_paths mdp_bus_usecases[] = { {
.num_paths = 1,
.vectors = &mdp_bus_vectors[0],
}, {
.num_paths = 1,
.vectors = &mdp_bus_vectors[1],
} };
static struct msm_bus_scale_pdata mdp_bus_scale_table = {
.usecase = mdp_bus_usecases,
.num_usecases = ARRAY_SIZE(mdp_bus_usecases),
.name = "mdss_mdp",
};
static void bs_init(struct mdp5_encoder *mdp5_encoder)
{
mdp5_encoder->bsc = msm_bus_scale_register_client(
&mdp_bus_scale_table);
DBG("bus scale client: %08x", mdp5_encoder->bsc);
}
static void bs_fini(struct mdp5_encoder *mdp5_encoder)
{
if (mdp5_encoder->bsc) {
msm_bus_scale_unregister_client(mdp5_encoder->bsc);
mdp5_encoder->bsc = 0;
}
}
static void bs_set(struct mdp5_encoder *mdp5_encoder, int idx)
{
if (mdp5_encoder->bsc) {
DBG("set bus scaling: %d", idx);
/* HACK: scaling down, and then immediately back up
* seems to leave things broken (underflow).. so
* never disable:
*/
idx = 1;
msm_bus_scale_client_update_request(mdp5_encoder->bsc, idx);
}
}
#else
static void bs_init(struct mdp5_encoder *mdp5_encoder) {}
static void bs_fini(struct mdp5_encoder *mdp5_encoder) {}
static void bs_set(struct mdp5_encoder *mdp5_encoder, int idx) {}
#endif
static void mdp5_encoder_destroy(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
bs_fini(mdp5_encoder);
drm_encoder_cleanup(encoder);
kfree(mdp5_encoder);
}
@ -222,8 +159,6 @@ static void mdp5_vid_encoder_disable(struct drm_encoder *encoder)
*/
mdp_irq_wait(&mdp5_kms->base, intf2vblank(mixer, intf));
bs_set(mdp5_encoder, 0);
mdp5_encoder->enabled = false;
}
@ -240,7 +175,6 @@ static void mdp5_vid_encoder_enable(struct drm_encoder *encoder)
if (WARN_ON(mdp5_encoder->enabled))
return;
bs_set(mdp5_encoder, 1);
spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 1);
spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
@ -426,8 +360,6 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs);
bs_init(mdp5_encoder);
return encoder;
fail:

View File

@ -0,0 +1,638 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
#include <linux/of_platform.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_edid.h>
#include "dp_catalog.h"
#include "dp_audio.h"
#include "dp_panel.h"
#include "dp_display.h"
#define HEADER_BYTE_2_BIT 0
#define PARITY_BYTE_2_BIT 8
#define HEADER_BYTE_1_BIT 16
#define PARITY_BYTE_1_BIT 24
#define HEADER_BYTE_3_BIT 16
#define PARITY_BYTE_3_BIT 24
struct dp_audio_private {
struct platform_device *audio_pdev;
struct platform_device *pdev;
struct dp_catalog *catalog;
struct dp_panel *panel;
bool engine_on;
u32 channels;
struct dp_audio dp_audio;
};
static u8 dp_audio_get_g0_value(u8 data)
{
u8 c[4];
u8 g[4];
u8 ret_data = 0;
u8 i;
for (i = 0; i < 4; i++)
c[i] = (data >> i) & 0x01;
g[0] = c[3];
g[1] = c[0] ^ c[3];
g[2] = c[1];
g[3] = c[2];
for (i = 0; i < 4; i++)
ret_data = ((g[i] & 0x01) << i) | ret_data;
return ret_data;
}
static u8 dp_audio_get_g1_value(u8 data)
{
u8 c[4];
u8 g[4];
u8 ret_data = 0;
u8 i;
for (i = 0; i < 4; i++)
c[i] = (data >> i) & 0x01;
g[0] = c[0] ^ c[3];
g[1] = c[0] ^ c[1] ^ c[3];
g[2] = c[1] ^ c[2];
g[3] = c[2] ^ c[3];
for (i = 0; i < 4; i++)
ret_data = ((g[i] & 0x01) << i) | ret_data;
return ret_data;
}
static u8 dp_audio_calculate_parity(u32 data)
{
u8 x0 = 0;
u8 x1 = 0;
u8 ci = 0;
u8 iData = 0;
u8 i = 0;
u8 parity_byte;
u8 num_byte = (data & 0xFF00) > 0 ? 8 : 2;
for (i = 0; i < num_byte; i++) {
iData = (data >> i*4) & 0xF;
ci = iData ^ x1;
x1 = x0 ^ dp_audio_get_g1_value(ci);
x0 = dp_audio_get_g0_value(ci);
}
parity_byte = x1 | (x0 << 4);
return parity_byte;
}
static u32 dp_audio_get_header(struct dp_catalog *catalog,
enum dp_catalog_audio_sdp_type sdp,
enum dp_catalog_audio_header_type header)
{
catalog->sdp_type = sdp;
catalog->sdp_header = header;
dp_catalog_audio_get_header(catalog);
return catalog->audio_data;
}
static void dp_audio_set_header(struct dp_catalog *catalog,
u32 data,
enum dp_catalog_audio_sdp_type sdp,
enum dp_catalog_audio_header_type header)
{
catalog->sdp_type = sdp;
catalog->sdp_header = header;
catalog->audio_data = data;
dp_catalog_audio_set_header(catalog);
}
static void dp_audio_stream_sdp(struct dp_audio_private *audio)
{
struct dp_catalog *catalog = audio->catalog;
u32 value, new_value;
u8 parity_byte;
/* Config header and parity byte 1 */
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1);
new_value = 0x02;
parity_byte = dp_audio_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
DRM_DEBUG_DP("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1);
/* Config header and parity byte 2 */
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2);
new_value = value;
parity_byte = dp_audio_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
DRM_DEBUG_DP("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2);
/* Config header and parity byte 3 */
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3);
new_value = audio->channels - 1;
parity_byte = dp_audio_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_3_BIT)
| (parity_byte << PARITY_BYTE_3_BIT));
DRM_DEBUG_DP("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3);
}
static void dp_audio_timestamp_sdp(struct dp_audio_private *audio)
{
struct dp_catalog *catalog = audio->catalog;
u32 value, new_value;
u8 parity_byte;
/* Config header and parity byte 1 */
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1);
new_value = 0x1;
parity_byte = dp_audio_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
DRM_DEBUG_DP("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1);
/* Config header and parity byte 2 */
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2);
new_value = 0x17;
parity_byte = dp_audio_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
DRM_DEBUG_DP("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2);
/* Config header and parity byte 3 */
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3);
new_value = (0x0 | (0x11 << 2));
parity_byte = dp_audio_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_3_BIT)
| (parity_byte << PARITY_BYTE_3_BIT));
DRM_DEBUG_DP("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3);
}
static void dp_audio_infoframe_sdp(struct dp_audio_private *audio)
{
struct dp_catalog *catalog = audio->catalog;
u32 value, new_value;
u8 parity_byte;
/* Config header and parity byte 1 */
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1);
new_value = 0x84;
parity_byte = dp_audio_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
DRM_DEBUG_DP("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1);
/* Config header and parity byte 2 */
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2);
new_value = 0x1b;
parity_byte = dp_audio_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
DRM_DEBUG_DP("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2);
/* Config header and parity byte 3 */
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3);
new_value = (0x0 | (0x11 << 2));
parity_byte = dp_audio_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_3_BIT)
| (parity_byte << PARITY_BYTE_3_BIT));
DRM_DEBUG_DP("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
new_value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3);
}
static void dp_audio_copy_management_sdp(struct dp_audio_private *audio)
{
struct dp_catalog *catalog = audio->catalog;
u32 value, new_value;
u8 parity_byte;
/* Config header and parity byte 1 */
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1);
new_value = 0x05;
parity_byte = dp_audio_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
DRM_DEBUG_DP("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1);
/* Config header and parity byte 2 */
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2);
new_value = 0x0F;
parity_byte = dp_audio_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
DRM_DEBUG_DP("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2);
/* Config header and parity byte 3 */
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3);
new_value = 0x0;
parity_byte = dp_audio_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_3_BIT)
| (parity_byte << PARITY_BYTE_3_BIT));
DRM_DEBUG_DP("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3);
}
static void dp_audio_isrc_sdp(struct dp_audio_private *audio)
{
struct dp_catalog *catalog = audio->catalog;
u32 value, new_value;
u8 parity_byte;
/* Config header and parity byte 1 */
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1);
new_value = 0x06;
parity_byte = dp_audio_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
DRM_DEBUG_DP("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1);
/* Config header and parity byte 2 */
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2);
new_value = 0x0F;
parity_byte = dp_audio_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
DRM_DEBUG_DP("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2);
}
static void dp_audio_setup_sdp(struct dp_audio_private *audio)
{
dp_catalog_audio_config_sdp(audio->catalog);
dp_audio_stream_sdp(audio);
dp_audio_timestamp_sdp(audio);
dp_audio_infoframe_sdp(audio);
dp_audio_copy_management_sdp(audio);
dp_audio_isrc_sdp(audio);
}
static void dp_audio_setup_acr(struct dp_audio_private *audio)
{
u32 select = 0;
struct dp_catalog *catalog = audio->catalog;
switch (audio->dp_audio.bw_code) {
case DP_LINK_BW_1_62:
select = 0;
break;
case DP_LINK_BW_2_7:
select = 1;
break;
case DP_LINK_BW_5_4:
select = 2;
break;
case DP_LINK_BW_8_1:
select = 3;
break;
default:
DRM_DEBUG_DP("Unknown link rate\n");
select = 0;
break;
}
catalog->audio_data = select;
dp_catalog_audio_config_acr(catalog);
}
static void dp_audio_safe_to_exit_level(struct dp_audio_private *audio)
{
struct dp_catalog *catalog = audio->catalog;
u32 safe_to_exit_level = 0;
switch (audio->dp_audio.lane_count) {
case 1:
safe_to_exit_level = 14;
break;
case 2:
safe_to_exit_level = 8;
break;
case 4:
safe_to_exit_level = 5;
break;
default:
DRM_DEBUG_DP("setting the default safe_to_exit_level = %u\n",
safe_to_exit_level);
safe_to_exit_level = 14;
break;
}
catalog->audio_data = safe_to_exit_level;
dp_catalog_audio_sfe_level(catalog);
}
static void dp_audio_enable(struct dp_audio_private *audio, bool enable)
{
struct dp_catalog *catalog = audio->catalog;
catalog->audio_data = enable;
dp_catalog_audio_enable(catalog);
audio->engine_on = enable;
}
static struct dp_audio_private *dp_audio_get_data(struct platform_device *pdev)
{
struct dp_audio *dp_audio;
struct msm_dp *dp_display;
if (!pdev) {
DRM_ERROR("invalid input\n");
return ERR_PTR(-ENODEV);
}
dp_display = platform_get_drvdata(pdev);
if (!dp_display) {
DRM_ERROR("invalid input\n");
return ERR_PTR(-ENODEV);
}
dp_audio = dp_display->dp_audio;
if (!dp_audio) {
DRM_ERROR("invalid dp_audio data\n");
return ERR_PTR(-EINVAL);
}
return container_of(dp_audio, struct dp_audio_private, dp_audio);
}
static int dp_audio_hook_plugged_cb(struct device *dev, void *data,
hdmi_codec_plugged_cb fn,
struct device *codec_dev)
{
struct platform_device *pdev;
struct msm_dp *dp_display;
pdev = to_platform_device(dev);
if (!pdev) {
pr_err("invalid input\n");
return -ENODEV;
}
dp_display = platform_get_drvdata(pdev);
if (!dp_display) {
pr_err("invalid input\n");
return -ENODEV;
}
return dp_display_set_plugged_cb(dp_display, fn, codec_dev);
}
static int dp_audio_get_eld(struct device *dev,
void *data, uint8_t *buf, size_t len)
{
struct platform_device *pdev;
struct msm_dp *dp_display;
pdev = to_platform_device(dev);
if (!pdev) {
DRM_ERROR("invalid input\n");
return -ENODEV;
}
dp_display = platform_get_drvdata(pdev);
if (!dp_display) {
DRM_ERROR("invalid input\n");
return -ENODEV;
}
memcpy(buf, dp_display->connector->eld,
min(sizeof(dp_display->connector->eld), len));
return 0;
}
int dp_audio_hw_params(struct device *dev,
void *data,
struct hdmi_codec_daifmt *daifmt,
struct hdmi_codec_params *params)
{
int rc = 0;
struct dp_audio_private *audio;
struct platform_device *pdev;
struct msm_dp *dp_display;
pdev = to_platform_device(dev);
dp_display = platform_get_drvdata(pdev);
/*
* there could be cases where sound card can be opened even
* before OR even when DP is not connected . This can cause
* unclocked access as the audio subsystem relies on the DP
* driver to maintain the correct state of clocks. To protect
* such cases check for connection status and bail out if not
* connected.
*/
if (!dp_display->power_on) {
rc = -EINVAL;
goto end;
}
audio = dp_audio_get_data(pdev);
if (IS_ERR(audio)) {
rc = PTR_ERR(audio);
goto end;
}
audio->channels = params->channels;
dp_audio_setup_sdp(audio);
dp_audio_setup_acr(audio);
dp_audio_safe_to_exit_level(audio);
dp_audio_enable(audio, true);
dp_display->audio_enabled = true;
end:
return rc;
}
static void dp_audio_shutdown(struct device *dev, void *data)
{
struct dp_audio_private *audio;
struct platform_device *pdev;
struct msm_dp *dp_display;
pdev = to_platform_device(dev);
dp_display = platform_get_drvdata(pdev);
audio = dp_audio_get_data(pdev);
if (IS_ERR(audio)) {
DRM_ERROR("failed to get audio data\n");
return;
}
/*
* if audio was not enabled there is no need
* to execute the shutdown and we can bail out early.
* This also makes sure that we dont cause an unclocked
* access when audio subsystem calls this without DP being
* connected. is_connected cannot be used here as its set
* to false earlier than this call
*/
if (!dp_display->audio_enabled)
return;
dp_audio_enable(audio, false);
/* signal the dp display to safely shutdown clocks */
dp_display_signal_audio_complete(dp_display);
}
static const struct hdmi_codec_ops dp_audio_codec_ops = {
.hw_params = dp_audio_hw_params,
.audio_shutdown = dp_audio_shutdown,
.get_eld = dp_audio_get_eld,
.hook_plugged_cb = dp_audio_hook_plugged_cb,
};
static struct hdmi_codec_pdata codec_data = {
.ops = &dp_audio_codec_ops,
.max_i2s_channels = 8,
.i2s = 1,
};
int dp_register_audio_driver(struct device *dev,
struct dp_audio *dp_audio)
{
struct dp_audio_private *audio_priv;
audio_priv = container_of(dp_audio,
struct dp_audio_private, dp_audio);
audio_priv->audio_pdev = platform_device_register_data(dev,
HDMI_CODEC_DRV_NAME,
PLATFORM_DEVID_AUTO,
&codec_data,
sizeof(codec_data));
return PTR_ERR_OR_ZERO(audio_priv->audio_pdev);
}
struct dp_audio *dp_audio_get(struct platform_device *pdev,
struct dp_panel *panel,
struct dp_catalog *catalog)
{
int rc = 0;
struct dp_audio_private *audio;
struct dp_audio *dp_audio;
if (!pdev || !panel || !catalog) {
DRM_ERROR("invalid input\n");
rc = -EINVAL;
goto error;
}
audio = devm_kzalloc(&pdev->dev, sizeof(*audio), GFP_KERNEL);
if (!audio) {
rc = -ENOMEM;
goto error;
}
audio->pdev = pdev;
audio->panel = panel;
audio->catalog = catalog;
dp_audio = &audio->dp_audio;
dp_catalog_audio_init(catalog);
return dp_audio;
error:
return ERR_PTR(rc);
}
void dp_audio_put(struct dp_audio *dp_audio)
{
struct dp_audio_private *audio;
if (!dp_audio)
return;
audio = container_of(dp_audio, struct dp_audio_private, dp_audio);
devm_kfree(&audio->pdev->dev, audio);
}

View File

@ -0,0 +1,72 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
*/
#ifndef _DP_AUDIO_H_
#define _DP_AUDIO_H_
#include <linux/platform_device.h>
#include "dp_panel.h"
#include "dp_catalog.h"
#include <sound/hdmi-codec.h>
/**
* struct dp_audio
* @lane_count: number of lanes configured in current session
* @bw_code: link rate's bandwidth code for current session
*/
struct dp_audio {
u32 lane_count;
u32 bw_code;
};
/**
* dp_audio_get()
*
* Creates and instance of dp audio.
*
* @pdev: caller's platform device instance.
* @panel: an instance of dp_panel module.
* @catalog: an instance of dp_catalog module.
*
* Returns the error code in case of failure, otherwize
* an instance of newly created dp_module.
*/
struct dp_audio *dp_audio_get(struct platform_device *pdev,
struct dp_panel *panel,
struct dp_catalog *catalog);
/**
* dp_register_audio_driver()
*
* Registers DP device with hdmi_codec interface.
*
* @dev: DP device instance.
* @dp_audio: an instance of dp_audio module.
*
*
* Returns the error code in case of failure, otherwise
* zero on success.
*/
int dp_register_audio_driver(struct device *dev,
struct dp_audio *dp_audio);
/**
* dp_audio_put()
*
* Cleans the dp_audio instance.
*
* @dp_audio: an instance of dp_audio.
*/
void dp_audio_put(struct dp_audio *dp_audio);
int dp_audio_hw_params(struct device *dev,
void *data,
struct hdmi_codec_daifmt *daifmt,
struct hdmi_codec_params *params);
#endif /* _DP_AUDIO_H_ */

View File

@ -0,0 +1,535 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
*/
#include <linux/delay.h>
#include <drm/drm_print.h>
#include "dp_reg.h"
#include "dp_aux.h"
#define DP_AUX_ENUM_STR(x) #x
struct dp_aux_private {
struct device *dev;
struct dp_catalog *catalog;
struct mutex mutex;
struct completion comp;
u32 aux_error_num;
u32 retry_cnt;
bool cmd_busy;
bool native;
bool read;
bool no_send_addr;
bool no_send_stop;
u32 offset;
u32 segment;
u32 isr;
struct drm_dp_aux dp_aux;
};
static const char *dp_aux_get_error(u32 aux_error)
{
switch (aux_error) {
case DP_AUX_ERR_NONE:
return DP_AUX_ENUM_STR(DP_AUX_ERR_NONE);
case DP_AUX_ERR_ADDR:
return DP_AUX_ENUM_STR(DP_AUX_ERR_ADDR);
case DP_AUX_ERR_TOUT:
return DP_AUX_ENUM_STR(DP_AUX_ERR_TOUT);
case DP_AUX_ERR_NACK:
return DP_AUX_ENUM_STR(DP_AUX_ERR_NACK);
case DP_AUX_ERR_DEFER:
return DP_AUX_ENUM_STR(DP_AUX_ERR_DEFER);
case DP_AUX_ERR_NACK_DEFER:
return DP_AUX_ENUM_STR(DP_AUX_ERR_NACK_DEFER);
default:
return "unknown";
}
}
static u32 dp_aux_write(struct dp_aux_private *aux,
struct drm_dp_aux_msg *msg)
{
u32 data[4], reg, len;
u8 *msgdata = msg->buffer;
int const AUX_CMD_FIFO_LEN = 128;
int i = 0;
if (aux->read)
len = 4;
else
len = msg->size + 4;
/*
* cmd fifo only has depth of 144 bytes
* limit buf length to 128 bytes here
*/
if (len > AUX_CMD_FIFO_LEN) {
DRM_ERROR("buf size greater than allowed size of 128 bytes\n");
return 0;
}
/* Pack cmd and write to HW */
data[0] = (msg->address >> 16) & 0xf; /* addr[19:16] */
if (aux->read)
data[0] |= BIT(4); /* R/W */
data[1] = (msg->address >> 8) & 0xff; /* addr[15:8] */
data[2] = msg->address & 0xff; /* addr[7:0] */
data[3] = (msg->size - 1) & 0xff; /* len[7:0] */
for (i = 0; i < len; i++) {
reg = (i < 4) ? data[i] : msgdata[i - 4];
/* index = 0, write */
reg = (((reg) << DP_AUX_DATA_OFFSET)
& DP_AUX_DATA_MASK) | DP_AUX_DATA_WRITE;
if (i == 0)
reg |= DP_AUX_DATA_INDEX_WRITE;
aux->catalog->aux_data = reg;
dp_catalog_aux_write_data(aux->catalog);
}
dp_catalog_aux_clear_trans(aux->catalog, false);
dp_catalog_aux_clear_hw_interrupts(aux->catalog);
reg = 0; /* Transaction number == 1 */
if (!aux->native) { /* i2c */
reg |= DP_AUX_TRANS_CTRL_I2C;
if (aux->no_send_addr)
reg |= DP_AUX_TRANS_CTRL_NO_SEND_ADDR;
if (aux->no_send_stop)
reg |= DP_AUX_TRANS_CTRL_NO_SEND_STOP;
}
reg |= DP_AUX_TRANS_CTRL_GO;
aux->catalog->aux_data = reg;
dp_catalog_aux_write_trans(aux->catalog);
return len;
}
static int dp_aux_cmd_fifo_tx(struct dp_aux_private *aux,
struct drm_dp_aux_msg *msg)
{
u32 ret, len, timeout;
int aux_timeout_ms = HZ/4;
reinit_completion(&aux->comp);
len = dp_aux_write(aux, msg);
if (len == 0) {
DRM_ERROR("DP AUX write failed\n");
return -EINVAL;
}
timeout = wait_for_completion_timeout(&aux->comp, aux_timeout_ms);
if (!timeout) {
DRM_ERROR("aux %s timeout\n", (aux->read ? "read" : "write"));
return -ETIMEDOUT;
}
if (aux->aux_error_num == DP_AUX_ERR_NONE) {
ret = len;
} else {
DRM_ERROR_RATELIMITED("aux err: %s\n",
dp_aux_get_error(aux->aux_error_num));
ret = -EINVAL;
}
return ret;
}
static void dp_aux_cmd_fifo_rx(struct dp_aux_private *aux,
struct drm_dp_aux_msg *msg)
{
u32 data;
u8 *dp;
u32 i, actual_i;
u32 len = msg->size;
dp_catalog_aux_clear_trans(aux->catalog, true);
data = DP_AUX_DATA_INDEX_WRITE; /* INDEX_WRITE */
data |= DP_AUX_DATA_READ; /* read */
aux->catalog->aux_data = data;
dp_catalog_aux_write_data(aux->catalog);
dp = msg->buffer;
/* discard first byte */
data = dp_catalog_aux_read_data(aux->catalog);
for (i = 0; i < len; i++) {
data = dp_catalog_aux_read_data(aux->catalog);
*dp++ = (u8)((data >> DP_AUX_DATA_OFFSET) & 0xff);
actual_i = (data >> DP_AUX_DATA_INDEX_OFFSET) & 0xFF;
if (i != actual_i)
DRM_ERROR("Index mismatch: expected %d, found %d\n",
i, actual_i);
}
}
static void dp_aux_native_handler(struct dp_aux_private *aux)
{
u32 isr = aux->isr;
if (isr & DP_INTR_AUX_I2C_DONE)
aux->aux_error_num = DP_AUX_ERR_NONE;
else if (isr & DP_INTR_WRONG_ADDR)
aux->aux_error_num = DP_AUX_ERR_ADDR;
else if (isr & DP_INTR_TIMEOUT)
aux->aux_error_num = DP_AUX_ERR_TOUT;
if (isr & DP_INTR_NACK_DEFER)
aux->aux_error_num = DP_AUX_ERR_NACK;
if (isr & DP_INTR_AUX_ERROR) {
aux->aux_error_num = DP_AUX_ERR_PHY;
dp_catalog_aux_clear_hw_interrupts(aux->catalog);
}
complete(&aux->comp);
}
static void dp_aux_i2c_handler(struct dp_aux_private *aux)
{
u32 isr = aux->isr;
if (isr & DP_INTR_AUX_I2C_DONE) {
if (isr & (DP_INTR_I2C_NACK | DP_INTR_I2C_DEFER))
aux->aux_error_num = DP_AUX_ERR_NACK;
else
aux->aux_error_num = DP_AUX_ERR_NONE;
} else {
if (isr & DP_INTR_WRONG_ADDR)
aux->aux_error_num = DP_AUX_ERR_ADDR;
else if (isr & DP_INTR_TIMEOUT)
aux->aux_error_num = DP_AUX_ERR_TOUT;
if (isr & DP_INTR_NACK_DEFER)
aux->aux_error_num = DP_AUX_ERR_NACK_DEFER;
if (isr & DP_INTR_I2C_NACK)
aux->aux_error_num = DP_AUX_ERR_NACK;
if (isr & DP_INTR_I2C_DEFER)
aux->aux_error_num = DP_AUX_ERR_DEFER;
if (isr & DP_INTR_AUX_ERROR) {
aux->aux_error_num = DP_AUX_ERR_PHY;
dp_catalog_aux_clear_hw_interrupts(aux->catalog);
}
}
complete(&aux->comp);
}
static void dp_aux_update_offset_and_segment(struct dp_aux_private *aux,
struct drm_dp_aux_msg *input_msg)
{
u32 edid_address = 0x50;
u32 segment_address = 0x30;
bool i2c_read = input_msg->request &
(DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
u8 *data;
if (aux->native || i2c_read || ((input_msg->address != edid_address) &&
(input_msg->address != segment_address)))
return;
data = input_msg->buffer;
if (input_msg->address == segment_address)
aux->segment = *data;
else
aux->offset = *data;
}
/**
* dp_aux_transfer_helper() - helper function for EDID read transactions
*
* @aux: DP AUX private structure
* @input_msg: input message from DRM upstream APIs
* @send_seg: send the segment to sink
*
* return: void
*
* This helper function is used to fix EDID reads for non-compliant
* sinks that do not handle the i2c middle-of-transaction flag correctly.
*/
static void dp_aux_transfer_helper(struct dp_aux_private *aux,
struct drm_dp_aux_msg *input_msg,
bool send_seg)
{
struct drm_dp_aux_msg helper_msg;
u32 message_size = 0x10;
u32 segment_address = 0x30;
u32 const edid_block_length = 0x80;
bool i2c_mot = input_msg->request & DP_AUX_I2C_MOT;
bool i2c_read = input_msg->request &
(DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
if (!i2c_mot || !i2c_read || (input_msg->size == 0))
return;
/*
* Sending the segment value and EDID offset will be performed
* from the DRM upstream EDID driver for each block. Avoid
* duplicate AUX transactions related to this while reading the
* first 16 bytes of each block.
*/
if (!(aux->offset % edid_block_length) || !send_seg)
goto end;
aux->read = false;
aux->cmd_busy = true;
aux->no_send_addr = true;
aux->no_send_stop = true;
/*
* Send the segment address for every i2c read in which the
* middle-of-tranaction flag is set. This is required to support EDID
* reads of more than 2 blocks as the segment address is reset to 0
* since we are overriding the middle-of-transaction flag for read
* transactions.
*/
if (aux->segment) {
memset(&helper_msg, 0, sizeof(helper_msg));
helper_msg.address = segment_address;
helper_msg.buffer = &aux->segment;
helper_msg.size = 1;
dp_aux_cmd_fifo_tx(aux, &helper_msg);
}
/*
* Send the offset address for every i2c read in which the
* middle-of-transaction flag is set. This will ensure that the sink
* will update its read pointer and return the correct portion of the
* EDID buffer in the subsequent i2c read trasntion triggered in the
* native AUX transfer function.
*/
memset(&helper_msg, 0, sizeof(helper_msg));
helper_msg.address = input_msg->address;
helper_msg.buffer = &aux->offset;
helper_msg.size = 1;
dp_aux_cmd_fifo_tx(aux, &helper_msg);
end:
aux->offset += message_size;
if (aux->offset == 0x80 || aux->offset == 0x100)
aux->segment = 0x0; /* reset segment at end of block */
}
/*
* This function does the real job to process an AUX transaction.
* It will call aux_reset() function to reset the AUX channel,
* if the waiting is timeout.
*/
static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
struct drm_dp_aux_msg *msg)
{
ssize_t ret;
int const aux_cmd_native_max = 16;
int const aux_cmd_i2c_max = 128;
int const retry_count = 5;
struct dp_aux_private *aux = container_of(dp_aux,
struct dp_aux_private, dp_aux);
mutex_lock(&aux->mutex);
aux->native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ);
/* Ignore address only message */
if ((msg->size == 0) || (msg->buffer == NULL)) {
msg->reply = aux->native ?
DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
ret = msg->size;
goto unlock_exit;
}
/* msg sanity check */
if ((aux->native && (msg->size > aux_cmd_native_max)) ||
(msg->size > aux_cmd_i2c_max)) {
DRM_ERROR("%s: invalid msg: size(%zu), request(%x)\n",
__func__, msg->size, msg->request);
ret = -EINVAL;
goto unlock_exit;
}
dp_aux_update_offset_and_segment(aux, msg);
dp_aux_transfer_helper(aux, msg, true);
aux->read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
aux->cmd_busy = true;
if (aux->read) {
aux->no_send_addr = true;
aux->no_send_stop = false;
} else {
aux->no_send_addr = true;
aux->no_send_stop = true;
}
ret = dp_aux_cmd_fifo_tx(aux, msg);
if (ret < 0) {
if (aux->native) {
aux->retry_cnt++;
if (!(aux->retry_cnt % retry_count))
dp_catalog_aux_update_cfg(aux->catalog);
dp_catalog_aux_reset(aux->catalog);
}
usleep_range(400, 500); /* at least 400us to next try */
goto unlock_exit;
}
if (aux->aux_error_num == DP_AUX_ERR_NONE) {
if (aux->read)
dp_aux_cmd_fifo_rx(aux, msg);
msg->reply = aux->native ?
DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
} else {
/* Reply defer to retry */
msg->reply = aux->native ?
DP_AUX_NATIVE_REPLY_DEFER : DP_AUX_I2C_REPLY_DEFER;
}
/* Return requested size for success or retry */
ret = msg->size;
aux->retry_cnt = 0;
unlock_exit:
aux->cmd_busy = false;
mutex_unlock(&aux->mutex);
return ret;
}
void dp_aux_isr(struct drm_dp_aux *dp_aux)
{
struct dp_aux_private *aux;
if (!dp_aux) {
DRM_ERROR("invalid input\n");
return;
}
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
aux->isr = dp_catalog_aux_get_irq(aux->catalog);
if (!aux->cmd_busy)
return;
if (aux->native)
dp_aux_native_handler(aux);
else
dp_aux_i2c_handler(aux);
}
void dp_aux_reconfig(struct drm_dp_aux *dp_aux)
{
struct dp_aux_private *aux;
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
dp_catalog_aux_update_cfg(aux->catalog);
dp_catalog_aux_reset(aux->catalog);
}
void dp_aux_init(struct drm_dp_aux *dp_aux)
{
struct dp_aux_private *aux;
if (!dp_aux) {
DRM_ERROR("invalid input\n");
return;
}
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
dp_catalog_aux_enable(aux->catalog, true);
aux->retry_cnt = 0;
}
void dp_aux_deinit(struct drm_dp_aux *dp_aux)
{
struct dp_aux_private *aux;
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
dp_catalog_aux_enable(aux->catalog, false);
}
int dp_aux_register(struct drm_dp_aux *dp_aux)
{
struct dp_aux_private *aux;
int ret;
if (!dp_aux) {
DRM_ERROR("invalid input\n");
return -EINVAL;
}
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
aux->dp_aux.name = "dpu_dp_aux";
aux->dp_aux.dev = aux->dev;
aux->dp_aux.transfer = dp_aux_transfer;
ret = drm_dp_aux_register(&aux->dp_aux);
if (ret) {
DRM_ERROR("%s: failed to register drm aux: %d\n", __func__,
ret);
return ret;
}
return 0;
}
void dp_aux_unregister(struct drm_dp_aux *dp_aux)
{
drm_dp_aux_unregister(dp_aux);
}
struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog)
{
struct dp_aux_private *aux;
if (!catalog) {
DRM_ERROR("invalid input\n");
return ERR_PTR(-ENODEV);
}
aux = devm_kzalloc(dev, sizeof(*aux), GFP_KERNEL);
if (!aux)
return ERR_PTR(-ENOMEM);
init_completion(&aux->comp);
aux->cmd_busy = false;
mutex_init(&aux->mutex);
aux->dev = dev;
aux->catalog = catalog;
aux->retry_cnt = 0;
return &aux->dp_aux;
}
void dp_aux_put(struct drm_dp_aux *dp_aux)
{
struct dp_aux_private *aux;
if (!dp_aux)
return;
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
mutex_destroy(&aux->mutex);
devm_kfree(aux->dev, aux);
}

View File

@ -0,0 +1,30 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
*/
#ifndef _DP_AUX_H_
#define _DP_AUX_H_
#include "dp_catalog.h"
#include <drm/drm_dp_helper.h>
#define DP_AUX_ERR_NONE 0
#define DP_AUX_ERR_ADDR -1
#define DP_AUX_ERR_TOUT -2
#define DP_AUX_ERR_NACK -3
#define DP_AUX_ERR_DEFER -4
#define DP_AUX_ERR_NACK_DEFER -5
#define DP_AUX_ERR_PHY -6
int dp_aux_register(struct drm_dp_aux *dp_aux);
void dp_aux_unregister(struct drm_dp_aux *dp_aux);
void dp_aux_isr(struct drm_dp_aux *dp_aux);
void dp_aux_init(struct drm_dp_aux *dp_aux);
void dp_aux_deinit(struct drm_dp_aux *dp_aux);
void dp_aux_reconfig(struct drm_dp_aux *dp_aux);
struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog);
void dp_aux_put(struct drm_dp_aux *aux);
#endif /*__DP_AUX_H_*/

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,131 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
*/
#ifndef _DP_CATALOG_H_
#define _DP_CATALOG_H_
#include <drm/drm_modes.h>
#include "dp_parser.h"
/* interrupts */
#define DP_INTR_HPD BIT(0)
#define DP_INTR_AUX_I2C_DONE BIT(3)
#define DP_INTR_WRONG_ADDR BIT(6)
#define DP_INTR_TIMEOUT BIT(9)
#define DP_INTR_NACK_DEFER BIT(12)
#define DP_INTR_WRONG_DATA_CNT BIT(15)
#define DP_INTR_I2C_NACK BIT(18)
#define DP_INTR_I2C_DEFER BIT(21)
#define DP_INTR_PLL_UNLOCKED BIT(24)
#define DP_INTR_AUX_ERROR BIT(27)
#define DP_INTR_READY_FOR_VIDEO BIT(0)
#define DP_INTR_IDLE_PATTERN_SENT BIT(3)
#define DP_INTR_FRAME_END BIT(6)
#define DP_INTR_CRC_UPDATED BIT(9)
#define DP_AUX_CFG_MAX_VALUE_CNT 3
/* PHY AUX config registers */
enum dp_phy_aux_config_type {
PHY_AUX_CFG0,
PHY_AUX_CFG1,
PHY_AUX_CFG2,
PHY_AUX_CFG3,
PHY_AUX_CFG4,
PHY_AUX_CFG5,
PHY_AUX_CFG6,
PHY_AUX_CFG7,
PHY_AUX_CFG8,
PHY_AUX_CFG9,
PHY_AUX_CFG_MAX,
};
enum dp_catalog_audio_sdp_type {
DP_AUDIO_SDP_STREAM,
DP_AUDIO_SDP_TIMESTAMP,
DP_AUDIO_SDP_INFOFRAME,
DP_AUDIO_SDP_COPYMANAGEMENT,
DP_AUDIO_SDP_ISRC,
DP_AUDIO_SDP_MAX,
};
enum dp_catalog_audio_header_type {
DP_AUDIO_SDP_HEADER_1,
DP_AUDIO_SDP_HEADER_2,
DP_AUDIO_SDP_HEADER_3,
DP_AUDIO_SDP_HEADER_MAX,
};
struct dp_catalog {
u32 aux_data;
u32 total;
u32 sync_start;
u32 width_blanking;
u32 dp_active;
enum dp_catalog_audio_sdp_type sdp_type;
enum dp_catalog_audio_header_type sdp_header;
u32 audio_data;
};
/* AUX APIs */
u32 dp_catalog_aux_read_data(struct dp_catalog *dp_catalog);
int dp_catalog_aux_write_data(struct dp_catalog *dp_catalog);
int dp_catalog_aux_write_trans(struct dp_catalog *dp_catalog);
int dp_catalog_aux_clear_trans(struct dp_catalog *dp_catalog, bool read);
int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog);
void dp_catalog_aux_reset(struct dp_catalog *dp_catalog);
void dp_catalog_aux_enable(struct dp_catalog *dp_catalog, bool enable);
void dp_catalog_aux_update_cfg(struct dp_catalog *dp_catalog);
int dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog);
/* DP Controller APIs */
void dp_catalog_ctrl_state_ctrl(struct dp_catalog *dp_catalog, u32 state);
void dp_catalog_ctrl_config_ctrl(struct dp_catalog *dp_catalog, u32 config);
void dp_catalog_ctrl_lane_mapping(struct dp_catalog *dp_catalog);
void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog *dp_catalog, bool enable);
void dp_catalog_ctrl_config_misc(struct dp_catalog *dp_catalog, u32 cc, u32 tb);
void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog, u32 rate,
u32 stream_rate_khz, bool fixed_nvid);
int dp_catalog_ctrl_set_pattern(struct dp_catalog *dp_catalog, u32 pattern);
void dp_catalog_ctrl_reset(struct dp_catalog *dp_catalog);
bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog *dp_catalog);
void dp_catalog_ctrl_enable_irq(struct dp_catalog *dp_catalog, bool enable);
void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog,
u32 intr_mask, bool en);
void dp_catalog_ctrl_hpd_config(struct dp_catalog *dp_catalog);
u32 dp_catalog_hpd_get_intr_status(struct dp_catalog *dp_catalog);
void dp_catalog_ctrl_phy_reset(struct dp_catalog *dp_catalog);
int dp_catalog_ctrl_update_vx_px(struct dp_catalog *dp_catalog, u8 v_level,
u8 p_level);
int dp_catalog_ctrl_get_interrupt(struct dp_catalog *dp_catalog);
void dp_catalog_ctrl_update_transfer_unit(struct dp_catalog *dp_catalog,
u32 dp_tu, u32 valid_boundary,
u32 valid_boundary2);
void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog,
u32 pattern);
u32 dp_catalog_ctrl_read_phy_pattern(struct dp_catalog *dp_catalog);
/* DP Panel APIs */
int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog);
void dp_catalog_dump_regs(struct dp_catalog *dp_catalog);
void dp_catalog_panel_tpg_enable(struct dp_catalog *dp_catalog,
struct drm_display_mode *drm_mode);
void dp_catalog_panel_tpg_disable(struct dp_catalog *dp_catalog);
struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_io *io);
/* DP Audio APIs */
void dp_catalog_audio_get_header(struct dp_catalog *catalog);
void dp_catalog_audio_set_header(struct dp_catalog *catalog);
void dp_catalog_audio_config_acr(struct dp_catalog *catalog);
void dp_catalog_audio_enable(struct dp_catalog *catalog);
void dp_catalog_audio_enable(struct dp_catalog *catalog);
void dp_catalog_audio_config_sdp(struct dp_catalog *catalog);
void dp_catalog_audio_init(struct dp_catalog *catalog);
void dp_catalog_audio_sfe_level(struct dp_catalog *catalog);
#endif /* _DP_CATALOG_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,36 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
*/
#ifndef _DP_CTRL_H_
#define _DP_CTRL_H_
#include "dp_aux.h"
#include "dp_panel.h"
#include "dp_link.h"
#include "dp_parser.h"
#include "dp_power.h"
#include "dp_catalog.h"
struct dp_ctrl {
bool orientation;
atomic_t aborted;
u32 pixel_rate;
};
int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip);
void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl);
int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl);
int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl);
int dp_ctrl_off(struct dp_ctrl *dp_ctrl);
void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl);
void dp_ctrl_isr(struct dp_ctrl *dp_ctrl);
void dp_ctrl_handle_sink_request(struct dp_ctrl *dp_ctrl);
struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link,
struct dp_panel *panel, struct drm_dp_aux *aux,
struct dp_power *power, struct dp_catalog *catalog,
struct dp_parser *parser);
void dp_ctrl_put(struct dp_ctrl *dp_ctrl);
#endif /* _DP_CTRL_H_ */

View File

@ -0,0 +1,485 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt)"[drm-dp] %s: " fmt, __func__
#include <linux/debugfs.h>
#include <drm/drm_connector.h>
#include <drm/drm_file.h>
#include "dp_parser.h"
#include "dp_catalog.h"
#include "dp_aux.h"
#include "dp_ctrl.h"
#include "dp_debug.h"
#include "dp_display.h"
#define DEBUG_NAME "msm_dp"
struct dp_debug_private {
struct dentry *root;
struct dp_usbpd *usbpd;
struct dp_link *link;
struct dp_panel *panel;
struct drm_connector **connector;
struct device *dev;
struct drm_device *drm_dev;
struct dp_debug dp_debug;
};
static int dp_debug_check_buffer_overflow(int rc, int *max_size, int *len)
{
if (rc >= *max_size) {
DRM_ERROR("buffer overflow\n");
return -EINVAL;
}
*len += rc;
*max_size = SZ_4K - *len;
return 0;
}
static ssize_t dp_debug_read_info(struct file *file, char __user *user_buff,
size_t count, loff_t *ppos)
{
struct dp_debug_private *debug = file->private_data;
char *buf;
u32 len = 0, rc = 0;
u64 lclk = 0;
u32 max_size = SZ_4K;
u32 link_params_rate;
struct drm_display_mode *drm_mode;
if (!debug)
return -ENODEV;
if (*ppos)
return 0;
buf = kzalloc(SZ_4K, GFP_KERNEL);
if (!buf)
return -ENOMEM;
drm_mode = &debug->panel->dp_mode.drm_mode;
rc = snprintf(buf + len, max_size, "\tname = %s\n", DEBUG_NAME);
if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
goto error;
rc = snprintf(buf + len, max_size,
"\tdp_panel\n\t\tmax_pclk_khz = %d\n",
debug->panel->max_pclk_khz);
if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
goto error;
rc = snprintf(buf + len, max_size,
"\tdrm_dp_link\n\t\trate = %u\n",
debug->panel->link_info.rate);
if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
goto error;
rc = snprintf(buf + len, max_size,
"\t\tnum_lanes = %u\n",
debug->panel->link_info.num_lanes);
if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
goto error;
rc = snprintf(buf + len, max_size,
"\t\tcapabilities = %lu\n",
debug->panel->link_info.capabilities);
if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
goto error;
rc = snprintf(buf + len, max_size,
"\tdp_panel_info:\n\t\tactive = %dx%d\n",
drm_mode->hdisplay,
drm_mode->vdisplay);
if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
goto error;
rc = snprintf(buf + len, max_size,
"\t\tback_porch = %dx%d\n",
drm_mode->htotal - drm_mode->hsync_end,
drm_mode->vtotal - drm_mode->vsync_end);
if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
goto error;
rc = snprintf(buf + len, max_size,
"\t\tfront_porch = %dx%d\n",
drm_mode->hsync_start - drm_mode->hdisplay,
drm_mode->vsync_start - drm_mode->vdisplay);
if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
goto error;
rc = snprintf(buf + len, max_size,
"\t\tsync_width = %dx%d\n",
drm_mode->hsync_end - drm_mode->hsync_start,
drm_mode->vsync_end - drm_mode->vsync_start);
if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
goto error;
rc = snprintf(buf + len, max_size,
"\t\tactive_low = %dx%d\n",
debug->panel->dp_mode.h_active_low,
debug->panel->dp_mode.v_active_low);
if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
goto error;
rc = snprintf(buf + len, max_size,
"\t\th_skew = %d\n",
drm_mode->hskew);
if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
goto error;
rc = snprintf(buf + len, max_size,
"\t\trefresh rate = %d\n",
drm_mode_vrefresh(drm_mode));
if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
goto error;
rc = snprintf(buf + len, max_size,
"\t\tpixel clock khz = %d\n",
drm_mode->clock);
if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
goto error;
rc = snprintf(buf + len, max_size,
"\t\tbpp = %d\n",
debug->panel->dp_mode.bpp);
if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
goto error;
/* Link Information */
rc = snprintf(buf + len, max_size,
"\tdp_link:\n\t\ttest_requested = %d\n",
debug->link->sink_request);
if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
goto error;
rc = snprintf(buf + len, max_size,
"\t\tnum_lanes = %d\n",
debug->link->link_params.num_lanes);
if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
goto error;
link_params_rate = debug->link->link_params.rate;
rc = snprintf(buf + len, max_size,
"\t\tbw_code = %d\n",
drm_dp_link_rate_to_bw_code(link_params_rate));
if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
goto error;
lclk = debug->link->link_params.rate * 1000;
rc = snprintf(buf + len, max_size,
"\t\tlclk = %lld\n", lclk);
if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
goto error;
rc = snprintf(buf + len, max_size,
"\t\tv_level = %d\n",
debug->link->phy_params.v_level);
if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
goto error;
rc = snprintf(buf + len, max_size,
"\t\tp_level = %d\n",
debug->link->phy_params.p_level);
if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
goto error;
if (copy_to_user(user_buff, buf, len))
goto error;
*ppos += len;
kfree(buf);
return len;
error:
kfree(buf);
return -EINVAL;
}
static int dp_test_data_show(struct seq_file *m, void *data)
{
struct drm_device *dev;
struct dp_debug_private *debug;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
u32 bpc;
debug = m->private;
dev = debug->drm_dev;
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->connector_type !=
DRM_MODE_CONNECTOR_DisplayPort)
continue;
if (connector->status == connector_status_connected) {
bpc = debug->link->test_video.test_bit_depth;
seq_printf(m, "hdisplay: %d\n",
debug->link->test_video.test_h_width);
seq_printf(m, "vdisplay: %d\n",
debug->link->test_video.test_v_height);
seq_printf(m, "bpc: %u\n",
dp_link_bit_depth_to_bpc(bpc));
} else
seq_puts(m, "0");
}
drm_connector_list_iter_end(&conn_iter);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(dp_test_data);
static int dp_test_type_show(struct seq_file *m, void *data)
{
struct dp_debug_private *debug = m->private;
struct drm_device *dev = debug->drm_dev;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->connector_type !=
DRM_MODE_CONNECTOR_DisplayPort)
continue;
if (connector->status == connector_status_connected)
seq_printf(m, "%02x", DP_TEST_LINK_VIDEO_PATTERN);
else
seq_puts(m, "0");
}
drm_connector_list_iter_end(&conn_iter);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(dp_test_type);
static ssize_t dp_test_active_write(struct file *file,
const char __user *ubuf,
size_t len, loff_t *offp)
{
char *input_buffer;
int status = 0;
struct dp_debug_private *debug;
struct drm_device *dev;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
int val = 0;
debug = ((struct seq_file *)file->private_data)->private;
dev = debug->drm_dev;
if (len == 0)
return 0;
input_buffer = memdup_user_nul(ubuf, len);
if (IS_ERR(input_buffer))
return PTR_ERR(input_buffer);
DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->connector_type !=
DRM_MODE_CONNECTOR_DisplayPort)
continue;
if (connector->status == connector_status_connected) {
status = kstrtoint(input_buffer, 10, &val);
if (status < 0)
break;
DRM_DEBUG_DRIVER("Got %d for test active\n", val);
/* To prevent erroneous activation of the compliance
* testing code, only accept an actual value of 1 here
*/
if (val == 1)
debug->panel->video_test = true;
else
debug->panel->video_test = false;
}
}
drm_connector_list_iter_end(&conn_iter);
kfree(input_buffer);
if (status < 0)
return status;
*offp += len;
return len;
}
static int dp_test_active_show(struct seq_file *m, void *data)
{
struct dp_debug_private *debug = m->private;
struct drm_device *dev = debug->drm_dev;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->connector_type !=
DRM_MODE_CONNECTOR_DisplayPort)
continue;
if (connector->status == connector_status_connected) {
if (debug->panel->video_test)
seq_puts(m, "1");
else
seq_puts(m, "0");
} else
seq_puts(m, "0");
}
drm_connector_list_iter_end(&conn_iter);
return 0;
}
static int dp_test_active_open(struct inode *inode,
struct file *file)
{
return single_open(file, dp_test_active_show,
inode->i_private);
}
static const struct file_operations dp_debug_fops = {
.open = simple_open,
.read = dp_debug_read_info,
};
static const struct file_operations test_active_fops = {
.owner = THIS_MODULE,
.open = dp_test_active_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = dp_test_active_write
};
static int dp_debug_init(struct dp_debug *dp_debug, struct drm_minor *minor)
{
int rc = 0;
struct dp_debug_private *debug = container_of(dp_debug,
struct dp_debug_private, dp_debug);
struct dentry *file;
struct dentry *test_active;
struct dentry *test_data, *test_type;
file = debugfs_create_file("dp_debug", 0444, minor->debugfs_root,
debug, &dp_debug_fops);
if (IS_ERR_OR_NULL(file)) {
rc = PTR_ERR(file);
DRM_ERROR("[%s] debugfs create file failed, rc=%d\n",
DEBUG_NAME, rc);
}
test_active = debugfs_create_file("msm_dp_test_active", 0444,
minor->debugfs_root,
debug, &test_active_fops);
if (IS_ERR_OR_NULL(test_active)) {
rc = PTR_ERR(test_active);
DRM_ERROR("[%s] debugfs test_active failed, rc=%d\n",
DEBUG_NAME, rc);
}
test_data = debugfs_create_file("msm_dp_test_data", 0444,
minor->debugfs_root,
debug, &dp_test_data_fops);
if (IS_ERR_OR_NULL(test_data)) {
rc = PTR_ERR(test_data);
DRM_ERROR("[%s] debugfs test_data failed, rc=%d\n",
DEBUG_NAME, rc);
}
test_type = debugfs_create_file("msm_dp_test_type", 0444,
minor->debugfs_root,
debug, &dp_test_type_fops);
if (IS_ERR_OR_NULL(test_type)) {
rc = PTR_ERR(test_type);
DRM_ERROR("[%s] debugfs test_type failed, rc=%d\n",
DEBUG_NAME, rc);
}
debug->root = minor->debugfs_root;
return rc;
}
struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
struct dp_usbpd *usbpd, struct dp_link *link,
struct drm_connector **connector, struct drm_minor *minor)
{
int rc = 0;
struct dp_debug_private *debug;
struct dp_debug *dp_debug;
if (!dev || !panel || !usbpd || !link) {
DRM_ERROR("invalid input\n");
rc = -EINVAL;
goto error;
}
debug = devm_kzalloc(dev, sizeof(*debug), GFP_KERNEL);
if (!debug) {
rc = -ENOMEM;
goto error;
}
debug->dp_debug.debug_en = false;
debug->usbpd = usbpd;
debug->link = link;
debug->panel = panel;
debug->dev = dev;
debug->drm_dev = minor->dev;
debug->connector = connector;
dp_debug = &debug->dp_debug;
dp_debug->vdisplay = 0;
dp_debug->hdisplay = 0;
dp_debug->vrefresh = 0;
rc = dp_debug_init(dp_debug, minor);
if (rc) {
devm_kfree(dev, debug);
goto error;
}
return dp_debug;
error:
return ERR_PTR(rc);
}
static int dp_debug_deinit(struct dp_debug *dp_debug)
{
struct dp_debug_private *debug;
if (!dp_debug)
return -EINVAL;
debug = container_of(dp_debug, struct dp_debug_private, dp_debug);
debugfs_remove_recursive(debug->root);
return 0;
}
void dp_debug_put(struct dp_debug *dp_debug)
{
struct dp_debug_private *debug;
if (!dp_debug)
return;
debug = container_of(dp_debug, struct dp_debug_private, dp_debug);
dp_debug_deinit(dp_debug);
devm_kfree(debug->dev, debug);
}

View File

@ -0,0 +1,74 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
*/
#ifndef _DP_DEBUG_H_
#define _DP_DEBUG_H_
#include "dp_panel.h"
#include "dp_link.h"
/**
* struct dp_debug
* @debug_en: specifies whether debug mode enabled
* @vdisplay: used to filter out vdisplay value
* @hdisplay: used to filter out hdisplay value
* @vrefresh: used to filter out vrefresh value
* @tpg_state: specifies whether tpg feature is enabled
*/
struct dp_debug {
bool debug_en;
int aspect_ratio;
int vdisplay;
int hdisplay;
int vrefresh;
};
#if defined(CONFIG_DEBUG_FS)
/**
* dp_debug_get() - configure and get the DisplayPlot debug module data
*
* @dev: device instance of the caller
* @panel: instance of panel module
* @usbpd: instance of usbpd module
* @link: instance of link module
* @connector: double pointer to display connector
* @minor: pointer to drm minor number after device registration
* return: pointer to allocated debug module data
*
* This function sets up the debug module and provides a way
* for debugfs input to be communicated with existing modules
*/
struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
struct dp_usbpd *usbpd, struct dp_link *link,
struct drm_connector **connector,
struct drm_minor *minor);
/**
* dp_debug_put()
*
* Cleans up dp_debug instance
*
* @dp_debug: instance of dp_debug
*/
void dp_debug_put(struct dp_debug *dp_debug);
#else
static inline
struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
struct dp_usbpd *usbpd, struct dp_link *link,
struct drm_connector **connector, struct drm_minor *minor)
{
return ERR_PTR(-EINVAL);
}
static inline void dp_debug_put(struct dp_debug *dp_debug)
{
}
#endif /* defined(CONFIG_DEBUG_FS) */
#endif /* _DP_DEBUG_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,39 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
*/
#ifndef _DP_DISPLAY_H_
#define _DP_DISPLAY_H_
#include "dp_panel.h"
#include <sound/hdmi-codec.h>
struct msm_dp {
struct drm_device *drm_dev;
struct device *codec_dev;
struct drm_connector *connector;
struct drm_encoder *encoder;
bool is_connected;
bool audio_enabled;
bool power_on;
hdmi_codec_plugged_cb plugged_cb;
u32 max_pclk_khz;
u32 max_dp_lanes;
struct dp_audio *dp_audio;
};
int dp_display_set_plugged_cb(struct msm_dp *dp_display,
hdmi_codec_plugged_cb fn, struct device *codec_dev);
int dp_display_validate_mode(struct msm_dp *dp_display, u32 mode_pclk_khz);
int dp_display_get_modes(struct msm_dp *dp_display,
struct dp_display_mode *dp_mode);
int dp_display_request_irq(struct msm_dp *dp_display);
bool dp_display_check_video_test(struct msm_dp *dp_display);
int dp_display_get_test_bpp(struct msm_dp *dp_display);
void dp_display_signal_audio_complete(struct msm_dp *dp_display);
#endif /* _DP_DISPLAY_H_ */

View File

@ -0,0 +1,164 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
*/
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_crtc.h>
#include "msm_drv.h"
#include "msm_kms.h"
#include "dp_drm.h"
struct dp_connector {
struct drm_connector base;
struct msm_dp *dp_display;
};
#define to_dp_connector(x) container_of(x, struct dp_connector, base)
/**
* dp_connector_detect - callback to determine if connector is connected
* @conn: Pointer to drm connector structure
* @force: Force detect setting from drm framework
* Returns: Connector 'is connected' status
*/
static enum drm_connector_status dp_connector_detect(struct drm_connector *conn,
bool force)
{
struct msm_dp *dp;
dp = to_dp_connector(conn)->dp_display;
DRM_DEBUG_DP("is_connected = %s\n",
(dp->is_connected) ? "true" : "false");
return (dp->is_connected) ? connector_status_connected :
connector_status_disconnected;
}
/**
* dp_connector_get_modes - callback to add drm modes via drm_mode_probed_add()
* @connector: Pointer to drm connector structure
* Returns: Number of modes added
*/
static int dp_connector_get_modes(struct drm_connector *connector)
{
int rc = 0;
struct msm_dp *dp;
struct dp_display_mode *dp_mode = NULL;
struct drm_display_mode *m, drm_mode;
if (!connector)
return 0;
dp = to_dp_connector(connector)->dp_display;
dp_mode = kzalloc(sizeof(*dp_mode), GFP_KERNEL);
if (!dp_mode)
return 0;
/* pluggable case assumes EDID is read when HPD */
if (dp->is_connected) {
/*
*The get_modes() function might return one mode that is stored
* in dp_mode when compliance test is in progress. If not, the
* return value is equal to the total number of modes supported
* by the sink
*/
rc = dp_display_get_modes(dp, dp_mode);
if (rc <= 0) {
DRM_ERROR("failed to get DP sink modes, rc=%d\n", rc);
kfree(dp_mode);
return rc;
}
if (dp_mode->drm_mode.clock) { /* valid DP mode */
memset(&drm_mode, 0x0, sizeof(drm_mode));
drm_mode_copy(&drm_mode, &dp_mode->drm_mode);
m = drm_mode_duplicate(connector->dev, &drm_mode);
if (!m) {
DRM_ERROR("failed to add mode %ux%u\n",
drm_mode.hdisplay,
drm_mode.vdisplay);
kfree(dp_mode);
return 0;
}
drm_mode_probed_add(connector, m);
}
} else {
DRM_DEBUG_DP("No sink connected\n");
}
kfree(dp_mode);
return rc;
}
/**
* dp_connector_mode_valid - callback to determine if specified mode is valid
* @connector: Pointer to drm connector structure
* @mode: Pointer to drm mode structure
* Returns: Validity status for specified mode
*/
static enum drm_mode_status dp_connector_mode_valid(
struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct msm_dp *dp_disp;
dp_disp = to_dp_connector(connector)->dp_display;
if ((dp_disp->max_pclk_khz <= 0) ||
(dp_disp->max_pclk_khz > DP_MAX_PIXEL_CLK_KHZ) ||
(mode->clock > dp_disp->max_pclk_khz))
return MODE_BAD;
return dp_display_validate_mode(dp_disp, mode->clock);
}
static const struct drm_connector_funcs dp_connector_funcs = {
.detect = dp_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_connector_helper_funcs dp_connector_helper_funcs = {
.get_modes = dp_connector_get_modes,
.mode_valid = dp_connector_mode_valid,
};
/* connector initialization */
struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display)
{
struct drm_connector *connector = NULL;
struct dp_connector *dp_connector;
int ret;
dp_connector = devm_kzalloc(dp_display->drm_dev->dev,
sizeof(*dp_connector),
GFP_KERNEL);
if (!dp_connector)
return ERR_PTR(-ENOMEM);
dp_connector->dp_display = dp_display;
connector = &dp_connector->base;
ret = drm_connector_init(dp_display->drm_dev, connector,
&dp_connector_funcs,
DRM_MODE_CONNECTOR_DisplayPort);
if (ret)
return ERR_PTR(ret);
drm_connector_helper_add(connector, &dp_connector_helper_funcs);
/*
* Enable HPD to let hpd event is handled when cable is connected.
*/
connector->polled = DRM_CONNECTOR_POLL_HPD;
drm_connector_attach_encoder(connector, dp_display->encoder);
return connector;
}

View File

@ -0,0 +1,18 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
*/
#ifndef _DP_DRM_H_
#define _DP_DRM_H_
#include <linux/types.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include "msm_drv.h"
#include "dp_display.h"
struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display);
#endif /* _DP_DRM_H_ */

View File

@ -0,0 +1,69 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
#include <linux/slab.h>
#include <linux/device.h>
#include "dp_hpd.h"
/* DP specific VDM commands */
#define DP_USBPD_VDM_STATUS 0x10
#define DP_USBPD_VDM_CONFIGURE 0x11
/* USBPD-TypeC specific Macros */
#define VDM_VERSION 0x0
#define USB_C_DP_SID 0xFF01
struct dp_hpd_private {
struct device *dev;
struct dp_usbpd_cb *dp_cb;
struct dp_usbpd dp_usbpd;
};
int dp_hpd_connect(struct dp_usbpd *dp_usbpd, bool hpd)
{
int rc = 0;
struct dp_hpd_private *hpd_priv;
hpd_priv = container_of(dp_usbpd, struct dp_hpd_private,
dp_usbpd);
dp_usbpd->hpd_high = hpd;
if (!hpd_priv->dp_cb && !hpd_priv->dp_cb->configure
&& !hpd_priv->dp_cb->disconnect) {
pr_err("hpd dp_cb not initialized\n");
return -EINVAL;
}
if (hpd)
hpd_priv->dp_cb->configure(hpd_priv->dev);
else
hpd_priv->dp_cb->disconnect(hpd_priv->dev);
return rc;
}
struct dp_usbpd *dp_hpd_get(struct device *dev, struct dp_usbpd_cb *cb)
{
struct dp_hpd_private *dp_hpd;
if (!cb) {
pr_err("invalid cb data\n");
return ERR_PTR(-EINVAL);
}
dp_hpd = devm_kzalloc(dev, sizeof(*dp_hpd), GFP_KERNEL);
if (!dp_hpd)
return ERR_PTR(-ENOMEM);
dp_hpd->dev = dev;
dp_hpd->dp_cb = cb;
dp_hpd->dp_usbpd.connect = dp_hpd_connect;
return &dp_hpd->dp_usbpd;
}

View File

@ -0,0 +1,80 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
*/
#ifndef _DP_HPD_H_
#define _DP_HPD_H_
//#include <linux/usb/usbpd.h>
#include <linux/types.h>
#include <linux/device.h>
enum plug_orientation {
ORIENTATION_NONE,
ORIENTATION_CC1,
ORIENTATION_CC2,
};
/**
* struct dp_usbpd - DisplayPort status
*
* @orientation: plug orientation configuration
* @low_pow_st: low power state
* @adaptor_dp_en: adaptor functionality enabled
* @multi_func: multi-function preferred
* @usb_config_req: request to switch to usb
* @exit_dp_mode: request exit from displayport mode
* @hpd_high: Hot Plug Detect signal is high.
* @hpd_irq: Change in the status since last message
* @alt_mode_cfg_done: bool to specify alt mode status
* @debug_en: bool to specify debug mode
* @connect: simulate disconnect or connect for debug mode
*/
struct dp_usbpd {
enum plug_orientation orientation;
bool low_pow_st;
bool adaptor_dp_en;
bool multi_func;
bool usb_config_req;
bool exit_dp_mode;
bool hpd_high;
bool hpd_irq;
bool alt_mode_cfg_done;
bool debug_en;
int (*connect)(struct dp_usbpd *dp_usbpd, bool hpd);
};
/**
* struct dp_usbpd_cb - callback functions provided by the client
*
* @configure: called by usbpd module when PD communication has
* been completed and the usb peripheral has been configured on
* dp mode.
* @disconnect: notify the cable disconnect issued by usb.
* @attention: notify any attention message issued by usb.
*/
struct dp_usbpd_cb {
int (*configure)(struct device *dev);
int (*disconnect)(struct device *dev);
int (*attention)(struct device *dev);
};
/**
* dp_hpd_get() - setup hpd module
*
* @dev: device instance of the caller
* @cb: struct containing callback function pointers.
*
* This function allows the client to initialize the usbpd
* module. The module will communicate with HPD module.
*/
struct dp_usbpd *dp_hpd_get(struct device *dev, struct dp_usbpd_cb *cb);
int dp_hpd_register(struct dp_usbpd *dp_usbpd);
void dp_hpd_unregister(struct dp_usbpd *dp_usbpd);
int dp_hpd_connect(struct dp_usbpd *dp_usbpd, bool hpd);
#endif /* _DP_HPD_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,155 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
*/
#ifndef _DP_LINK_H_
#define _DP_LINK_H_
#include "dp_aux.h"
#define DS_PORT_STATUS_CHANGED 0x200
#define DP_TEST_BIT_DEPTH_UNKNOWN 0xFFFFFFFF
#define DP_LINK_CAP_ENHANCED_FRAMING (1 << 0)
struct dp_link_info {
unsigned char revision;
unsigned int rate;
unsigned int num_lanes;
unsigned long capabilities;
};
enum dp_link_voltage_level {
DP_TRAIN_VOLTAGE_SWING_LVL_0 = 0,
DP_TRAIN_VOLTAGE_SWING_LVL_1 = 1,
DP_TRAIN_VOLTAGE_SWING_LVL_2 = 2,
DP_TRAIN_VOLTAGE_SWING_MAX = DP_TRAIN_VOLTAGE_SWING_LVL_2,
};
enum dp_link_preemaphasis_level {
DP_TRAIN_PRE_EMPHASIS_LVL_0 = 0,
DP_TRAIN_PRE_EMPHASIS_LVL_1 = 1,
DP_TRAIN_PRE_EMPHASIS_LVL_2 = 2,
DP_TRAIN_PRE_EMPHASIS_MAX = DP_TRAIN_PRE_EMPHASIS_LVL_2,
};
struct dp_link_test_video {
u32 test_video_pattern;
u32 test_bit_depth;
u32 test_dyn_range;
u32 test_h_total;
u32 test_v_total;
u32 test_h_start;
u32 test_v_start;
u32 test_hsync_pol;
u32 test_hsync_width;
u32 test_vsync_pol;
u32 test_vsync_width;
u32 test_h_width;
u32 test_v_height;
u32 test_rr_d;
u32 test_rr_n;
};
struct dp_link_test_audio {
u32 test_audio_sampling_rate;
u32 test_audio_channel_count;
u32 test_audio_pattern_type;
u32 test_audio_period_ch_1;
u32 test_audio_period_ch_2;
u32 test_audio_period_ch_3;
u32 test_audio_period_ch_4;
u32 test_audio_period_ch_5;
u32 test_audio_period_ch_6;
u32 test_audio_period_ch_7;
u32 test_audio_period_ch_8;
};
struct dp_link_phy_params {
u32 phy_test_pattern_sel;
u8 v_level;
u8 p_level;
};
struct dp_link {
u32 sink_request;
u32 test_response;
bool psm_enabled;
u8 sink_count;
struct dp_link_test_video test_video;
struct dp_link_test_audio test_audio;
struct dp_link_phy_params phy_params;
struct dp_link_info link_params;
};
/**
* mdss_dp_test_bit_depth_to_bpp() - convert test bit depth to bpp
* @tbd: test bit depth
*
* Returns the bits per pixel (bpp) to be used corresponding to the
* git bit depth value. This function assumes that bit depth has
* already been validated.
*/
static inline u32 dp_link_bit_depth_to_bpp(u32 tbd)
{
/*
* Few simplistic rules and assumptions made here:
* 1. Bit depth is per color component
* 2. If bit depth is unknown return 0
* 3. Assume 3 color components
*/
switch (tbd) {
case DP_TEST_BIT_DEPTH_6:
return 18;
case DP_TEST_BIT_DEPTH_8:
return 24;
case DP_TEST_BIT_DEPTH_10:
return 30;
case DP_TEST_BIT_DEPTH_UNKNOWN:
default:
return 0;
}
}
/**
* dp_test_bit_depth_to_bpc() - convert test bit depth to bpc
* @tbd: test bit depth
*
* Returns the bits per comp (bpc) to be used corresponding to the
* bit depth value. This function assumes that bit depth has
* already been validated.
*/
static inline u32 dp_link_bit_depth_to_bpc(u32 tbd)
{
switch (tbd) {
case DP_TEST_BIT_DEPTH_6:
return 6;
case DP_TEST_BIT_DEPTH_8:
return 8;
case DP_TEST_BIT_DEPTH_10:
return 10;
case DP_TEST_BIT_DEPTH_UNKNOWN:
default:
return 0;
}
}
u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp);
int dp_link_process_request(struct dp_link *dp_link);
int dp_link_get_colorimetry_config(struct dp_link *dp_link);
int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status);
bool dp_link_send_test_response(struct dp_link *dp_link);
int dp_link_psm_config(struct dp_link *dp_link,
struct dp_link_info *link_info, bool enable);
bool dp_link_send_edid_checksum(struct dp_link *dp_link, u8 checksum);
/**
* dp_link_get() - get the functionalities of dp test module
*
*
* return: a pointer to dp_link struct
*/
struct dp_link *dp_link_get(struct device *dev, struct drm_dp_aux *aux);
#endif /* _DP_LINK_H_ */

View File

@ -0,0 +1,463 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
*/
#include "dp_panel.h"
#include <drm/drm_connector.h>
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
struct dp_panel_private {
struct device *dev;
struct dp_panel dp_panel;
struct drm_dp_aux *aux;
struct dp_link *link;
struct dp_catalog *catalog;
bool panel_on;
bool aux_cfg_update_done;
};
static int dp_panel_read_dpcd(struct dp_panel *dp_panel)
{
int rc = 0;
size_t len;
ssize_t rlen;
struct dp_panel_private *panel;
struct dp_link_info *link_info;
u8 *dpcd, major = 0, minor = 0, temp;
u32 offset = DP_DPCD_REV;
dpcd = dp_panel->dpcd;
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
link_info = &dp_panel->link_info;
rlen = drm_dp_dpcd_read(panel->aux, offset,
dpcd, (DP_RECEIVER_CAP_SIZE + 1));
if (rlen < (DP_RECEIVER_CAP_SIZE + 1)) {
DRM_ERROR("dpcd read failed, rlen=%zd\n", rlen);
if (rlen == -ETIMEDOUT)
rc = rlen;
else
rc = -EINVAL;
goto end;
}
temp = dpcd[DP_TRAINING_AUX_RD_INTERVAL];
/* check for EXTENDED_RECEIVER_CAPABILITY_FIELD_PRESENT */
if (temp & BIT(7)) {
DRM_DEBUG_DP("using EXTENDED_RECEIVER_CAPABILITY_FIELD\n");
offset = DPRX_EXTENDED_DPCD_FIELD;
}
rlen = drm_dp_dpcd_read(panel->aux, offset,
dpcd, (DP_RECEIVER_CAP_SIZE + 1));
if (rlen < (DP_RECEIVER_CAP_SIZE + 1)) {
DRM_ERROR("dpcd read failed, rlen=%zd\n", rlen);
if (rlen == -ETIMEDOUT)
rc = rlen;
else
rc = -EINVAL;
goto end;
}
link_info->revision = dpcd[DP_DPCD_REV];
major = (link_info->revision >> 4) & 0x0f;
minor = link_info->revision & 0x0f;
link_info->rate = drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]);
link_info->num_lanes = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
if (link_info->num_lanes > dp_panel->max_dp_lanes)
link_info->num_lanes = dp_panel->max_dp_lanes;
/* Limit support upto HBR2 until HBR3 support is added */
if (link_info->rate >= (drm_dp_bw_code_to_link_rate(DP_LINK_BW_5_4)))
link_info->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_5_4);
DRM_DEBUG_DP("version: %d.%d\n", major, minor);
DRM_DEBUG_DP("link_rate=%d\n", link_info->rate);
DRM_DEBUG_DP("lane_count=%d\n", link_info->num_lanes);
if (drm_dp_enhanced_frame_cap(dpcd))
link_info->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
dp_panel->dfp_present = dpcd[DP_DOWNSTREAMPORT_PRESENT];
dp_panel->dfp_present &= DP_DWN_STRM_PORT_PRESENT;
if (dp_panel->dfp_present && (dpcd[DP_DPCD_REV] > 0x10)) {
dp_panel->ds_port_cnt = dpcd[DP_DOWN_STREAM_PORT_COUNT];
dp_panel->ds_port_cnt &= DP_PORT_COUNT_MASK;
len = DP_DOWNSTREAM_PORTS * DP_DOWNSTREAM_CAP_SIZE;
rlen = drm_dp_dpcd_read(panel->aux,
DP_DOWNSTREAM_PORT_0, dp_panel->ds_cap_info, len);
if (rlen < len) {
DRM_ERROR("ds port status failed, rlen=%zd\n", rlen);
rc = -EINVAL;
goto end;
}
}
end:
return rc;
}
static u32 dp_panel_get_supported_bpp(struct dp_panel *dp_panel,
u32 mode_edid_bpp, u32 mode_pclk_khz)
{
struct dp_link_info *link_info;
const u32 max_supported_bpp = 30, min_supported_bpp = 18;
u32 bpp = 0, data_rate_khz = 0;
bpp = min_t(u32, mode_edid_bpp, max_supported_bpp);
link_info = &dp_panel->link_info;
data_rate_khz = link_info->num_lanes * link_info->rate * 8;
while (bpp > min_supported_bpp) {
if (mode_pclk_khz * bpp <= data_rate_khz)
break;
bpp -= 6;
}
return bpp;
}
static int dp_panel_update_modes(struct drm_connector *connector,
struct edid *edid)
{
int rc = 0;
if (edid) {
rc = drm_connector_update_edid_property(connector, edid);
if (rc) {
DRM_ERROR("failed to update edid property %d\n", rc);
return rc;
}
rc = drm_add_edid_modes(connector, edid);
DRM_DEBUG_DP("%s -", __func__);
return rc;
}
rc = drm_connector_update_edid_property(connector, NULL);
if (rc)
DRM_ERROR("failed to update edid property %d\n", rc);
return rc;
}
int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
struct drm_connector *connector)
{
int rc = 0, bw_code;
int rlen, count;
struct dp_panel_private *panel;
if (!dp_panel || !connector) {
DRM_ERROR("invalid input\n");
return -EINVAL;
}
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
rc = dp_panel_read_dpcd(dp_panel);
bw_code = drm_dp_link_rate_to_bw_code(dp_panel->link_info.rate);
if (rc || !is_link_rate_valid(bw_code) ||
!is_lane_count_valid(dp_panel->link_info.num_lanes) ||
(bw_code > dp_panel->max_bw_code)) {
DRM_ERROR("read dpcd failed %d\n", rc);
return rc;
}
if (dp_panel->dfp_present) {
rlen = drm_dp_dpcd_read(panel->aux, DP_SINK_COUNT,
&count, 1);
if (rlen == 1) {
count = DP_GET_SINK_COUNT(count);
if (!count) {
DRM_ERROR("no downstream ports connected\n");
panel->link->sink_count = 0;
rc = -ENOTCONN;
goto end;
}
}
}
kfree(dp_panel->edid);
dp_panel->edid = NULL;
dp_panel->edid = drm_get_edid(connector,
&panel->aux->ddc);
if (!dp_panel->edid) {
DRM_ERROR("panel edid read failed\n");
/* fail safe edid */
mutex_lock(&connector->dev->mode_config.mutex);
if (drm_add_modes_noedid(connector, 640, 480))
drm_set_preferred_mode(connector, 640, 480);
mutex_unlock(&connector->dev->mode_config.mutex);
}
if (panel->aux_cfg_update_done) {
DRM_DEBUG_DP("read DPCD with updated AUX config\n");
rc = dp_panel_read_dpcd(dp_panel);
bw_code = drm_dp_link_rate_to_bw_code(dp_panel->link_info.rate);
if (rc || !is_link_rate_valid(bw_code) ||
!is_lane_count_valid(dp_panel->link_info.num_lanes)
|| (bw_code > dp_panel->max_bw_code)) {
DRM_ERROR("read dpcd failed %d\n", rc);
return rc;
}
panel->aux_cfg_update_done = false;
}
end:
return rc;
}
u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel,
u32 mode_edid_bpp, u32 mode_pclk_khz)
{
struct dp_panel_private *panel;
u32 bpp = mode_edid_bpp;
if (!dp_panel || !mode_edid_bpp || !mode_pclk_khz) {
DRM_ERROR("invalid input\n");
return 0;
}
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
if (dp_panel->video_test)
bpp = dp_link_bit_depth_to_bpp(
panel->link->test_video.test_bit_depth);
else
bpp = dp_panel_get_supported_bpp(dp_panel, mode_edid_bpp,
mode_pclk_khz);
return bpp;
}
int dp_panel_get_modes(struct dp_panel *dp_panel,
struct drm_connector *connector, struct dp_display_mode *mode)
{
if (!dp_panel) {
DRM_ERROR("invalid input\n");
return -EINVAL;
}
if (dp_panel->edid)
return dp_panel_update_modes(connector, dp_panel->edid);
return 0;
}
static u8 dp_panel_get_edid_checksum(struct edid *edid)
{
struct edid *last_block;
u8 *raw_edid;
bool is_edid_corrupt;
if (!edid) {
DRM_ERROR("invalid edid input\n");
return 0;
}
raw_edid = (u8 *)edid;
raw_edid += (edid->extensions * EDID_LENGTH);
last_block = (struct edid *)raw_edid;
/* block type extension */
drm_edid_block_valid(raw_edid, 1, false, &is_edid_corrupt);
if (!is_edid_corrupt)
return last_block->checksum;
DRM_ERROR("Invalid block, no checksum\n");
return 0;
}
void dp_panel_handle_sink_request(struct dp_panel *dp_panel)
{
struct dp_panel_private *panel;
if (!dp_panel) {
DRM_ERROR("invalid input\n");
return;
}
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
if (panel->link->sink_request & DP_TEST_LINK_EDID_READ) {
u8 checksum = dp_panel_get_edid_checksum(dp_panel->edid);
dp_link_send_edid_checksum(panel->link, checksum);
dp_link_send_test_response(panel->link);
}
}
void dp_panel_tpg_config(struct dp_panel *dp_panel, bool enable)
{
struct dp_catalog *catalog;
struct dp_panel_private *panel;
if (!dp_panel) {
DRM_ERROR("invalid input\n");
return;
}
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
catalog = panel->catalog;
if (!panel->panel_on) {
DRM_DEBUG_DP("DP panel not enabled, handle TPG on next on\n");
return;
}
if (!enable) {
dp_catalog_panel_tpg_disable(catalog);
return;
}
DRM_DEBUG_DP("%s: calling catalog tpg_enable\n", __func__);
dp_catalog_panel_tpg_enable(catalog, &panel->dp_panel.dp_mode.drm_mode);
}
void dp_panel_dump_regs(struct dp_panel *dp_panel)
{
struct dp_catalog *catalog;
struct dp_panel_private *panel;
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
catalog = panel->catalog;
dp_catalog_dump_regs(catalog);
}
int dp_panel_timing_cfg(struct dp_panel *dp_panel)
{
int rc = 0;
u32 data, total_ver, total_hor;
struct dp_catalog *catalog;
struct dp_panel_private *panel;
struct drm_display_mode *drm_mode;
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
catalog = panel->catalog;
drm_mode = &panel->dp_panel.dp_mode.drm_mode;
DRM_DEBUG_DP("width=%d hporch= %d %d %d\n",
drm_mode->hdisplay, drm_mode->htotal - drm_mode->hsync_end,
drm_mode->hsync_start - drm_mode->hdisplay,
drm_mode->hsync_end - drm_mode->hsync_start);
DRM_DEBUG_DP("height=%d vporch= %d %d %d\n",
drm_mode->vdisplay, drm_mode->vtotal - drm_mode->vsync_end,
drm_mode->vsync_start - drm_mode->vdisplay,
drm_mode->vsync_end - drm_mode->vsync_start);
total_hor = drm_mode->htotal;
total_ver = drm_mode->vtotal;
data = total_ver;
data <<= 16;
data |= total_hor;
catalog->total = data;
data = (drm_mode->vtotal - drm_mode->vsync_start);
data <<= 16;
data |= (drm_mode->htotal - drm_mode->hsync_start);
catalog->sync_start = data;
data = drm_mode->vsync_end - drm_mode->vsync_start;
data <<= 16;
data |= (panel->dp_panel.dp_mode.v_active_low << 31);
data |= drm_mode->hsync_end - drm_mode->hsync_start;
data |= (panel->dp_panel.dp_mode.h_active_low << 15);
catalog->width_blanking = data;
data = drm_mode->vdisplay;
data <<= 16;
data |= drm_mode->hdisplay;
catalog->dp_active = data;
dp_catalog_panel_timing_cfg(catalog);
panel->panel_on = true;
return rc;
}
int dp_panel_init_panel_info(struct dp_panel *dp_panel)
{
int rc = 0;
struct drm_display_mode *drm_mode;
drm_mode = &dp_panel->dp_mode.drm_mode;
/*
* print resolution info as this is a result
* of user initiated action of cable connection
*/
DRM_DEBUG_DP("SET NEW RESOLUTION:\n");
DRM_DEBUG_DP("%dx%d@%dfps\n", drm_mode->hdisplay,
drm_mode->vdisplay, drm_mode_vrefresh(drm_mode));
DRM_DEBUG_DP("h_porches(back|front|width) = (%d|%d|%d)\n",
drm_mode->htotal - drm_mode->hsync_end,
drm_mode->hsync_start - drm_mode->hdisplay,
drm_mode->hsync_end - drm_mode->hsync_start);
DRM_DEBUG_DP("v_porches(back|front|width) = (%d|%d|%d)\n",
drm_mode->vtotal - drm_mode->vsync_end,
drm_mode->vsync_start - drm_mode->vdisplay,
drm_mode->vsync_end - drm_mode->vsync_start);
DRM_DEBUG_DP("pixel clock (KHz)=(%d)\n", drm_mode->clock);
DRM_DEBUG_DP("bpp = %d\n", dp_panel->dp_mode.bpp);
dp_panel->dp_mode.bpp = max_t(u32, 18,
min_t(u32, dp_panel->dp_mode.bpp, 30));
DRM_DEBUG_DP("updated bpp = %d\n", dp_panel->dp_mode.bpp);
return rc;
}
struct dp_panel *dp_panel_get(struct dp_panel_in *in)
{
struct dp_panel_private *panel;
struct dp_panel *dp_panel;
if (!in->dev || !in->catalog || !in->aux || !in->link) {
DRM_ERROR("invalid input\n");
return ERR_PTR(-EINVAL);
}
panel = devm_kzalloc(in->dev, sizeof(*panel), GFP_KERNEL);
if (!panel)
return ERR_PTR(-ENOMEM);
panel->dev = in->dev;
panel->aux = in->aux;
panel->catalog = in->catalog;
panel->link = in->link;
dp_panel = &panel->dp_panel;
dp_panel->max_bw_code = DP_LINK_BW_8_1;
panel->aux_cfg_update_done = false;
return dp_panel;
}
void dp_panel_put(struct dp_panel *dp_panel)
{
if (!dp_panel)
return;
kfree(dp_panel->edid);
}

View File

@ -0,0 +1,100 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
*/
#ifndef _DP_PANEL_H_
#define _DP_PANEL_H_
#include <drm/msm_drm.h>
#include "dp_aux.h"
#include "dp_link.h"
#include "dp_hpd.h"
struct edid;
#define DPRX_EXTENDED_DPCD_FIELD 0x2200
#define DP_DOWNSTREAM_PORTS 4
#define DP_DOWNSTREAM_CAP_SIZE 4
struct dp_display_mode {
struct drm_display_mode drm_mode;
u32 capabilities;
u32 bpp;
u32 h_active_low;
u32 v_active_low;
};
struct dp_panel_in {
struct device *dev;
struct drm_dp_aux *aux;
struct dp_link *link;
struct dp_catalog *catalog;
};
struct dp_panel {
/* dpcd raw data */
u8 dpcd[DP_RECEIVER_CAP_SIZE + 1];
u8 ds_cap_info[DP_DOWNSTREAM_PORTS * DP_DOWNSTREAM_CAP_SIZE];
u32 ds_port_cnt;
u32 dfp_present;
struct dp_link_info link_info;
struct drm_dp_desc desc;
struct edid *edid;
struct drm_connector *connector;
struct dp_display_mode dp_mode;
bool video_test;
u32 vic;
u32 max_pclk_khz;
u32 max_dp_lanes;
u32 max_bw_code;
};
int dp_panel_init_panel_info(struct dp_panel *dp_panel);
int dp_panel_deinit(struct dp_panel *dp_panel);
int dp_panel_timing_cfg(struct dp_panel *dp_panel);
void dp_panel_dump_regs(struct dp_panel *dp_panel);
int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
struct drm_connector *connector);
u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel, u32 mode_max_bpp,
u32 mode_pclk_khz);
int dp_panel_get_modes(struct dp_panel *dp_panel,
struct drm_connector *connector, struct dp_display_mode *mode);
void dp_panel_handle_sink_request(struct dp_panel *dp_panel);
void dp_panel_tpg_config(struct dp_panel *dp_panel, bool enable);
/**
* is_link_rate_valid() - validates the link rate
* @lane_rate: link rate requested by the sink
*
* Returns true if the requested link rate is supported.
*/
static inline bool is_link_rate_valid(u32 bw_code)
{
return (bw_code == DP_LINK_BW_1_62 ||
bw_code == DP_LINK_BW_2_7 ||
bw_code == DP_LINK_BW_5_4 ||
bw_code == DP_LINK_BW_8_1);
}
/**
* dp_link_is_lane_count_valid() - validates the lane count
* @lane_count: lane count requested by the sink
*
* Returns true if the requested lane count is supported.
*/
static inline bool is_lane_count_valid(u32 lane_count)
{
return (lane_count == 1 ||
lane_count == 2 ||
lane_count == 4);
}
struct dp_panel *dp_panel_get(struct dp_panel_in *in);
void dp_panel_put(struct dp_panel *dp_panel);
#endif /* _DP_PANEL_H_ */

View File

@ -0,0 +1,293 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
*/
#include <linux/of_gpio.h>
#include <linux/phy/phy.h>
#include <drm/drm_print.h>
#include "dp_parser.h"
#include "dp_reg.h"
static const struct dp_regulator_cfg sdm845_dp_reg_cfg = {
.num = 2,
.regs = {
{"vdda-1p2", 21800, 4 }, /* 1.2 V */
{"vdda-0p9", 36000, 32 }, /* 0.9 V */
},
};
static int msm_dss_ioremap(struct platform_device *pdev,
struct dss_io_data *io_data)
{
struct resource *res = NULL;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
DRM_ERROR("%pS->%s: msm_dss_get_res failed\n",
__builtin_return_address(0), __func__);
return -ENODEV;
}
io_data->len = (u32)resource_size(res);
io_data->base = ioremap(res->start, io_data->len);
if (!io_data->base) {
DRM_ERROR("%pS->%s: ioremap failed\n",
__builtin_return_address(0), __func__);
return -EIO;
}
return 0;
}
static void msm_dss_iounmap(struct dss_io_data *io_data)
{
if (io_data->base) {
iounmap(io_data->base);
io_data->base = NULL;
}
io_data->len = 0;
}
static void dp_parser_unmap_io_resources(struct dp_parser *parser)
{
struct dp_io *io = &parser->io;
msm_dss_iounmap(&io->dp_controller);
}
static int dp_parser_ctrl_res(struct dp_parser *parser)
{
int rc = 0;
struct platform_device *pdev = parser->pdev;
struct dp_io *io = &parser->io;
rc = msm_dss_ioremap(pdev, &io->dp_controller);
if (rc) {
DRM_ERROR("unable to remap dp io resources, rc=%d\n", rc);
goto err;
}
io->phy = devm_phy_get(&pdev->dev, "dp");
if (IS_ERR(io->phy)) {
rc = PTR_ERR(io->phy);
goto err;
}
return 0;
err:
dp_parser_unmap_io_resources(parser);
return rc;
}
static int dp_parser_misc(struct dp_parser *parser)
{
struct device_node *of_node = parser->pdev->dev.of_node;
int len = 0;
const char *data_lane_property = "data-lanes";
len = of_property_count_elems_of_size(of_node,
data_lane_property, sizeof(u32));
if (len < 0) {
DRM_WARN("Invalid property %s, default max DP lanes = %d\n",
data_lane_property, DP_MAX_NUM_DP_LANES);
len = DP_MAX_NUM_DP_LANES;
}
parser->max_dp_lanes = len;
return 0;
}
static inline bool dp_parser_check_prefix(const char *clk_prefix,
const char *clk_name)
{
return !strncmp(clk_prefix, clk_name, strlen(clk_prefix));
}
static int dp_parser_init_clk_data(struct dp_parser *parser)
{
int num_clk, i, rc;
int core_clk_count = 0, ctrl_clk_count = 0, stream_clk_count = 0;
const char *clk_name;
struct device *dev = &parser->pdev->dev;
struct dss_module_power *core_power = &parser->mp[DP_CORE_PM];
struct dss_module_power *ctrl_power = &parser->mp[DP_CTRL_PM];
struct dss_module_power *stream_power = &parser->mp[DP_STREAM_PM];
num_clk = of_property_count_strings(dev->of_node, "clock-names");
if (num_clk <= 0) {
DRM_ERROR("no clocks are defined\n");
return -EINVAL;
}
for (i = 0; i < num_clk; i++) {
rc = of_property_read_string_index(dev->of_node,
"clock-names", i, &clk_name);
if (rc < 0)
return rc;
if (dp_parser_check_prefix("core", clk_name))
core_clk_count++;
if (dp_parser_check_prefix("ctrl", clk_name))
ctrl_clk_count++;
if (dp_parser_check_prefix("stream", clk_name))
stream_clk_count++;
}
/* Initialize the CORE power module */
if (core_clk_count == 0) {
DRM_ERROR("no core clocks are defined\n");
return -EINVAL;
}
core_power->num_clk = core_clk_count;
core_power->clk_config = devm_kzalloc(dev,
sizeof(struct dss_clk) * core_power->num_clk,
GFP_KERNEL);
if (!core_power->clk_config)
return -EINVAL;
/* Initialize the CTRL power module */
if (ctrl_clk_count == 0) {
DRM_ERROR("no ctrl clocks are defined\n");
return -EINVAL;
}
ctrl_power->num_clk = ctrl_clk_count;
ctrl_power->clk_config = devm_kzalloc(dev,
sizeof(struct dss_clk) * ctrl_power->num_clk,
GFP_KERNEL);
if (!ctrl_power->clk_config) {
ctrl_power->num_clk = 0;
return -EINVAL;
}
/* Initialize the STREAM power module */
if (stream_clk_count == 0) {
DRM_ERROR("no stream (pixel) clocks are defined\n");
return -EINVAL;
}
stream_power->num_clk = stream_clk_count;
stream_power->clk_config = devm_kzalloc(dev,
sizeof(struct dss_clk) * stream_power->num_clk,
GFP_KERNEL);
if (!stream_power->clk_config) {
stream_power->num_clk = 0;
return -EINVAL;
}
return 0;
}
static int dp_parser_clock(struct dp_parser *parser)
{
int rc = 0, i = 0;
int num_clk = 0;
int core_clk_index = 0, ctrl_clk_index = 0, stream_clk_index = 0;
int core_clk_count = 0, ctrl_clk_count = 0, stream_clk_count = 0;
const char *clk_name;
struct device *dev = &parser->pdev->dev;
struct dss_module_power *core_power = &parser->mp[DP_CORE_PM];
struct dss_module_power *ctrl_power = &parser->mp[DP_CTRL_PM];
struct dss_module_power *stream_power = &parser->mp[DP_STREAM_PM];
rc = dp_parser_init_clk_data(parser);
if (rc) {
DRM_ERROR("failed to initialize power data %d\n", rc);
return -EINVAL;
}
core_clk_count = core_power->num_clk;
ctrl_clk_count = ctrl_power->num_clk;
stream_clk_count = stream_power->num_clk;
num_clk = core_clk_count + ctrl_clk_count + stream_clk_count;
for (i = 0; i < num_clk; i++) {
rc = of_property_read_string_index(dev->of_node, "clock-names",
i, &clk_name);
if (rc) {
DRM_ERROR("error reading clock-names %d\n", rc);
return rc;
}
if (dp_parser_check_prefix("core", clk_name) &&
core_clk_index < core_clk_count) {
struct dss_clk *clk =
&core_power->clk_config[core_clk_index];
strlcpy(clk->clk_name, clk_name, sizeof(clk->clk_name));
clk->type = DSS_CLK_AHB;
core_clk_index++;
} else if (dp_parser_check_prefix("stream", clk_name) &&
stream_clk_index < stream_clk_count) {
struct dss_clk *clk =
&stream_power->clk_config[stream_clk_index];
strlcpy(clk->clk_name, clk_name, sizeof(clk->clk_name));
clk->type = DSS_CLK_PCLK;
stream_clk_index++;
} else if (dp_parser_check_prefix("ctrl", clk_name) &&
ctrl_clk_index < ctrl_clk_count) {
struct dss_clk *clk =
&ctrl_power->clk_config[ctrl_clk_index];
strlcpy(clk->clk_name, clk_name, sizeof(clk->clk_name));
ctrl_clk_index++;
if (dp_parser_check_prefix("ctrl_link", clk_name) ||
dp_parser_check_prefix("stream_pixel", clk_name))
clk->type = DSS_CLK_PCLK;
else
clk->type = DSS_CLK_AHB;
}
}
DRM_DEBUG_DP("clock parsing successful\n");
return 0;
}
static int dp_parser_parse(struct dp_parser *parser)
{
int rc = 0;
if (!parser) {
DRM_ERROR("invalid input\n");
return -EINVAL;
}
rc = dp_parser_ctrl_res(parser);
if (rc)
return rc;
rc = dp_parser_misc(parser);
if (rc)
return rc;
rc = dp_parser_clock(parser);
if (rc)
return rc;
/* Map the corresponding regulator information according to
* version. Currently, since we only have one supported platform,
* mapping the regulator directly.
*/
parser->regulator_cfg = &sdm845_dp_reg_cfg;
return 0;
}
struct dp_parser *dp_parser_get(struct platform_device *pdev)
{
struct dp_parser *parser;
parser = devm_kzalloc(&pdev->dev, sizeof(*parser), GFP_KERNEL);
if (!parser)
return ERR_PTR(-ENOMEM);
parser->parse = dp_parser_parse;
parser->pdev = pdev;
return parser;
}

View File

@ -0,0 +1,136 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
*/
#ifndef _DP_PARSER_H_
#define _DP_PARSER_H_
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
#include <linux/phy/phy-dp.h>
#include "dpu_io_util.h"
#include "msm_drv.h"
#define DP_LABEL "MDSS DP DISPLAY"
#define DP_MAX_PIXEL_CLK_KHZ 675000
#define DP_MAX_NUM_DP_LANES 4
enum dp_pm_type {
DP_CORE_PM,
DP_CTRL_PM,
DP_STREAM_PM,
DP_PHY_PM,
DP_MAX_PM
};
struct dss_io_data {
u32 len;
void __iomem *base;
};
static inline const char *dp_parser_pm_name(enum dp_pm_type module)
{
switch (module) {
case DP_CORE_PM: return "DP_CORE_PM";
case DP_CTRL_PM: return "DP_CTRL_PM";
case DP_STREAM_PM: return "DP_STREAM_PM";
case DP_PHY_PM: return "DP_PHY_PM";
default: return "???";
}
}
/**
* struct dp_display_data - display related device tree data.
*
* @ctrl_node: referece to controller device
* @phy_node: reference to phy device
* @is_active: is the controller currently active
* @name: name of the display
* @display_type: type of the display
*/
struct dp_display_data {
struct device_node *ctrl_node;
struct device_node *phy_node;
bool is_active;
const char *name;
const char *display_type;
};
/**
* struct dp_ctrl_resource - controller's IO related data
*
* @dp_controller: Display Port controller mapped memory address
* @phy_io: phy's mapped memory address
*/
struct dp_io {
struct dss_io_data dp_controller;
struct phy *phy;
union phy_configure_opts phy_opts;
};
/**
* struct dp_pinctrl - DP's pin control
*
* @pin: pin-controller's instance
* @state_active: active state pin control
* @state_hpd_active: hpd active state pin control
* @state_suspend: suspend state pin control
*/
struct dp_pinctrl {
struct pinctrl *pin;
struct pinctrl_state *state_active;
struct pinctrl_state *state_hpd_active;
struct pinctrl_state *state_suspend;
};
#define DP_DEV_REGULATOR_MAX 4
/* Regulators for DP devices */
struct dp_reg_entry {
char name[32];
int enable_load;
int disable_load;
};
struct dp_regulator_cfg {
int num;
struct dp_reg_entry regs[DP_DEV_REGULATOR_MAX];
};
/**
* struct dp_parser - DP parser's data exposed to clients
*
* @pdev: platform data of the client
* @mp: gpio, regulator and clock related data
* @pinctrl: pin-control related data
* @disp_data: controller's display related data
* @parse: function to be called by client to parse device tree.
*/
struct dp_parser {
struct platform_device *pdev;
struct dss_module_power mp[DP_MAX_PM];
struct dp_pinctrl pinctrl;
struct dp_io io;
struct dp_display_data disp_data;
const struct dp_regulator_cfg *regulator_cfg;
u32 max_dp_lanes;
int (*parse)(struct dp_parser *parser);
};
/**
* dp_parser_get() - get the DP's device tree parser module
*
* @pdev: platform data of the client
* return: pointer to dp_parser structure.
*
* This function provides client capability to parse the
* device tree and populate the data structures. The data
* related to clock, regulators, pin-control and other
* can be parsed using this module.
*/
struct dp_parser *dp_parser_get(struct platform_device *pdev);
#endif

View File

@ -0,0 +1,372 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/regulator/consumer.h>
#include "dp_power.h"
#include "msm_drv.h"
struct dp_power_private {
struct dp_parser *parser;
struct platform_device *pdev;
struct clk *link_clk_src;
struct clk *pixel_provider;
struct clk *link_provider;
struct regulator_bulk_data supplies[DP_DEV_REGULATOR_MAX];
struct dp_power dp_power;
};
static void dp_power_regulator_disable(struct dp_power_private *power)
{
struct regulator_bulk_data *s = power->supplies;
const struct dp_reg_entry *regs = power->parser->regulator_cfg->regs;
int num = power->parser->regulator_cfg->num;
int i;
DBG("");
for (i = num - 1; i >= 0; i--)
if (regs[i].disable_load >= 0)
regulator_set_load(s[i].consumer,
regs[i].disable_load);
regulator_bulk_disable(num, s);
}
static int dp_power_regulator_enable(struct dp_power_private *power)
{
struct regulator_bulk_data *s = power->supplies;
const struct dp_reg_entry *regs = power->parser->regulator_cfg->regs;
int num = power->parser->regulator_cfg->num;
int ret, i;
DBG("");
for (i = 0; i < num; i++) {
if (regs[i].enable_load >= 0) {
ret = regulator_set_load(s[i].consumer,
regs[i].enable_load);
if (ret < 0) {
pr_err("regulator %d set op mode failed, %d\n",
i, ret);
goto fail;
}
}
}
ret = regulator_bulk_enable(num, s);
if (ret < 0) {
pr_err("regulator enable failed, %d\n", ret);
goto fail;
}
return 0;
fail:
for (i--; i >= 0; i--)
regulator_set_load(s[i].consumer, regs[i].disable_load);
return ret;
}
static int dp_power_regulator_init(struct dp_power_private *power)
{
struct regulator_bulk_data *s = power->supplies;
const struct dp_reg_entry *regs = power->parser->regulator_cfg->regs;
struct platform_device *pdev = power->pdev;
int num = power->parser->regulator_cfg->num;
int i, ret;
for (i = 0; i < num; i++)
s[i].supply = regs[i].name;
ret = devm_regulator_bulk_get(&pdev->dev, num, s);
if (ret < 0) {
pr_err("%s: failed to init regulator, ret=%d\n",
__func__, ret);
return ret;
}
return 0;
}
static int dp_power_clk_init(struct dp_power_private *power)
{
int rc = 0;
struct dss_module_power *core, *ctrl, *stream;
struct device *dev = &power->pdev->dev;
core = &power->parser->mp[DP_CORE_PM];
ctrl = &power->parser->mp[DP_CTRL_PM];
stream = &power->parser->mp[DP_STREAM_PM];
rc = msm_dss_get_clk(dev, core->clk_config, core->num_clk);
if (rc) {
DRM_ERROR("failed to get %s clk. err=%d\n",
dp_parser_pm_name(DP_CORE_PM), rc);
return rc;
}
rc = msm_dss_get_clk(dev, ctrl->clk_config, ctrl->num_clk);
if (rc) {
DRM_ERROR("failed to get %s clk. err=%d\n",
dp_parser_pm_name(DP_CTRL_PM), rc);
msm_dss_put_clk(core->clk_config, core->num_clk);
return -ENODEV;
}
rc = msm_dss_get_clk(dev, stream->clk_config, stream->num_clk);
if (rc) {
DRM_ERROR("failed to get %s clk. err=%d\n",
dp_parser_pm_name(DP_CTRL_PM), rc);
msm_dss_put_clk(core->clk_config, core->num_clk);
return -ENODEV;
}
return 0;
}
static int dp_power_clk_deinit(struct dp_power_private *power)
{
struct dss_module_power *core, *ctrl, *stream;
core = &power->parser->mp[DP_CORE_PM];
ctrl = &power->parser->mp[DP_CTRL_PM];
stream = &power->parser->mp[DP_STREAM_PM];
if (!core || !ctrl || !stream) {
DRM_ERROR("invalid power_data\n");
return -EINVAL;
}
msm_dss_put_clk(ctrl->clk_config, ctrl->num_clk);
msm_dss_put_clk(core->clk_config, core->num_clk);
msm_dss_put_clk(stream->clk_config, stream->num_clk);
return 0;
}
static int dp_power_clk_set_rate(struct dp_power_private *power,
enum dp_pm_type module, bool enable)
{
int rc = 0;
struct dss_module_power *mp = &power->parser->mp[module];
if (enable) {
rc = msm_dss_clk_set_rate(mp->clk_config, mp->num_clk);
if (rc) {
DRM_ERROR("failed to set clks rate.\n");
return rc;
}
}
rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, enable);
if (rc) {
DRM_ERROR("failed to %d clks, err: %d\n", enable, rc);
return rc;
}
return 0;
}
int dp_power_clk_status(struct dp_power *dp_power, enum dp_pm_type pm_type)
{
if (pm_type == DP_CORE_PM)
return dp_power->core_clks_on;
if (pm_type == DP_CTRL_PM)
return dp_power->link_clks_on;
if (pm_type == DP_STREAM_PM)
return dp_power->stream_clks_on;
return 0;
}
int dp_power_clk_enable(struct dp_power *dp_power,
enum dp_pm_type pm_type, bool enable)
{
int rc = 0;
struct dp_power_private *power;
power = container_of(dp_power, struct dp_power_private, dp_power);
if (pm_type != DP_CORE_PM && pm_type != DP_CTRL_PM &&
pm_type != DP_STREAM_PM) {
DRM_ERROR("unsupported power module: %s\n",
dp_parser_pm_name(pm_type));
return -EINVAL;
}
if (enable) {
if (pm_type == DP_CORE_PM && dp_power->core_clks_on) {
DRM_DEBUG_DP("core clks already enabled\n");
return 0;
}
if (pm_type == DP_CTRL_PM && dp_power->link_clks_on) {
DRM_DEBUG_DP("links clks already enabled\n");
return 0;
}
if (pm_type == DP_STREAM_PM && dp_power->stream_clks_on) {
DRM_DEBUG_DP("pixel clks already enabled\n");
return 0;
}
if ((pm_type == DP_CTRL_PM) && (!dp_power->core_clks_on)) {
DRM_DEBUG_DP("Enable core clks before link clks\n");
rc = dp_power_clk_set_rate(power, DP_CORE_PM, enable);
if (rc) {
DRM_ERROR("fail to enable clks: %s. err=%d\n",
dp_parser_pm_name(DP_CORE_PM), rc);
return rc;
}
dp_power->core_clks_on = true;
}
}
rc = dp_power_clk_set_rate(power, pm_type, enable);
if (rc) {
DRM_ERROR("failed to '%s' clks for: %s. err=%d\n",
enable ? "enable" : "disable",
dp_parser_pm_name(pm_type), rc);
return rc;
}
if (pm_type == DP_CORE_PM)
dp_power->core_clks_on = enable;
else if (pm_type == DP_STREAM_PM)
dp_power->stream_clks_on = enable;
else
dp_power->link_clks_on = enable;
DRM_DEBUG_DP("%s clocks for %s\n",
enable ? "enable" : "disable",
dp_parser_pm_name(pm_type));
DRM_DEBUG_DP("strem_clks:%s link_clks:%s core_clks:%s\n",
dp_power->stream_clks_on ? "on" : "off",
dp_power->link_clks_on ? "on" : "off",
dp_power->core_clks_on ? "on" : "off");
return 0;
}
int dp_power_client_init(struct dp_power *dp_power)
{
int rc = 0;
struct dp_power_private *power;
if (!dp_power) {
DRM_ERROR("invalid power data\n");
return -EINVAL;
}
power = container_of(dp_power, struct dp_power_private, dp_power);
pm_runtime_enable(&power->pdev->dev);
rc = dp_power_regulator_init(power);
if (rc) {
DRM_ERROR("failed to init regulators %d\n", rc);
goto error;
}
rc = dp_power_clk_init(power);
if (rc) {
DRM_ERROR("failed to init clocks %d\n", rc);
goto error;
}
return 0;
error:
pm_runtime_disable(&power->pdev->dev);
return rc;
}
void dp_power_client_deinit(struct dp_power *dp_power)
{
struct dp_power_private *power;
if (!dp_power) {
DRM_ERROR("invalid power data\n");
return;
}
power = container_of(dp_power, struct dp_power_private, dp_power);
dp_power_clk_deinit(power);
pm_runtime_disable(&power->pdev->dev);
}
int dp_power_init(struct dp_power *dp_power, bool flip)
{
int rc = 0;
struct dp_power_private *power = NULL;
if (!dp_power) {
DRM_ERROR("invalid power data\n");
return -EINVAL;
}
power = container_of(dp_power, struct dp_power_private, dp_power);
pm_runtime_get_sync(&power->pdev->dev);
rc = dp_power_regulator_enable(power);
if (rc) {
DRM_ERROR("failed to enable regulators, %d\n", rc);
goto exit;
}
rc = dp_power_clk_enable(dp_power, DP_CORE_PM, true);
if (rc) {
DRM_ERROR("failed to enable DP core clocks, %d\n", rc);
goto err_clk;
}
return 0;
err_clk:
dp_power_regulator_disable(power);
exit:
pm_runtime_put_sync(&power->pdev->dev);
return rc;
}
int dp_power_deinit(struct dp_power *dp_power)
{
struct dp_power_private *power;
power = container_of(dp_power, struct dp_power_private, dp_power);
dp_power_clk_enable(dp_power, DP_CORE_PM, false);
dp_power_regulator_disable(power);
pm_runtime_put_sync(&power->pdev->dev);
return 0;
}
struct dp_power *dp_power_get(struct dp_parser *parser)
{
struct dp_power_private *power;
struct dp_power *dp_power;
if (!parser) {
DRM_ERROR("invalid input\n");
return ERR_PTR(-EINVAL);
}
power = devm_kzalloc(&parser->pdev->dev, sizeof(*power), GFP_KERNEL);
if (!power)
return ERR_PTR(-ENOMEM);
power->parser = parser;
power->pdev = parser->pdev;
dp_power = &power->dp_power;
return dp_power;
}

View File

@ -0,0 +1,107 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
*/
#ifndef _DP_POWER_H_
#define _DP_POWER_H_
#include "dp_parser.h"
/**
* sruct dp_power - DisplayPort's power related data
*
* @init: initializes the regulators/core clocks/GPIOs/pinctrl
* @deinit: turns off the regulators/core clocks/GPIOs/pinctrl
* @clk_enable: enable/disable the DP clocks
* @set_pixel_clk_parent: set the parent of DP pixel clock
*/
struct dp_power {
bool core_clks_on;
bool link_clks_on;
bool stream_clks_on;
};
/**
* dp_power_init() - enable power supplies for display controller
*
* @power: instance of power module
* @flip: bool for flipping gpio direction
* return: 0 if success or error if failure.
*
* This API will turn on the regulators and configures gpio's
* aux/hpd.
*/
int dp_power_init(struct dp_power *power, bool flip);
/**
* dp_power_deinit() - turn off regulators and gpios.
*
* @power: instance of power module
* return: 0 for success
*
* This API turns off power and regulators.
*/
int dp_power_deinit(struct dp_power *power);
/**
* dp_power_clk_status() - display controller clocks status
*
* @power: instance of power module
* @pm_type: type of pm, core/ctrl/phy
* return: status of power clocks
*
* This API return status of DP clocks
*/
int dp_power_clk_status(struct dp_power *dp_power, enum dp_pm_type pm_type);
/**
* dp_power_clk_enable() - enable display controller clocks
*
* @power: instance of power module
* @pm_type: type of pm, core/ctrl/phy
* @enable: enables or disables
* return: pointer to allocated power module data
*
* This API will call setrate and enable for DP clocks
*/
int dp_power_clk_enable(struct dp_power *power, enum dp_pm_type pm_type,
bool enable);
/**
* dp_power_client_init() - initialize clock and regulator modules
*
* @power: instance of power module
* return: 0 for success, error for failure.
*
* This API will configure the DisplayPort's clocks and regulator
* modules.
*/
int dp_power_client_init(struct dp_power *power);
/**
* dp_power_clinet_deinit() - de-initialize clock and regulator modules
*
* @power: instance of power module
* return: 0 for success, error for failure.
*
* This API will de-initialize the DisplayPort's clocks and regulator
* modueles.
*/
void dp_power_client_deinit(struct dp_power *power);
/**
* dp_power_get() - configure and get the DisplayPort power module data
*
* @parser: instance of parser module
* return: pointer to allocated power module data
*
* This API will configure the DisplayPort's power module and provides
* methods to be called by the client to configure the power related
* modueles.
*/
struct dp_power *dp_power_get(struct dp_parser *parser);
#endif /* _DP_POWER_H_ */

View File

@ -0,0 +1,306 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
*/
#ifndef _DP_REG_H_
#define _DP_REG_H_
/* DP_TX Registers */
#define REG_DP_HW_VERSION (0x00000000)
#define REG_DP_SW_RESET (0x00000010)
#define DP_SW_RESET (0x00000001)
#define REG_DP_PHY_CTRL (0x00000014)
#define DP_PHY_CTRL_SW_RESET_PLL (0x00000001)
#define DP_PHY_CTRL_SW_RESET (0x00000004)
#define REG_DP_CLK_CTRL (0x00000018)
#define REG_DP_CLK_ACTIVE (0x0000001C)
#define REG_DP_INTR_STATUS (0x00000020)
#define REG_DP_INTR_STATUS2 (0x00000024)
#define REG_DP_INTR_STATUS3 (0x00000028)
#define REG_DP_DP_HPD_CTRL (0x00000000)
#define DP_DP_HPD_CTRL_HPD_EN (0x00000001)
#define REG_DP_DP_HPD_INT_STATUS (0x00000004)
#define REG_DP_DP_HPD_INT_ACK (0x00000008)
#define DP_DP_HPD_PLUG_INT_ACK (0x00000001)
#define DP_DP_IRQ_HPD_INT_ACK (0x00000002)
#define DP_DP_HPD_REPLUG_INT_ACK (0x00000004)
#define DP_DP_HPD_UNPLUG_INT_ACK (0x00000008)
#define REG_DP_DP_HPD_INT_MASK (0x0000000C)
#define DP_DP_HPD_PLUG_INT_MASK (0x00000001)
#define DP_DP_IRQ_HPD_INT_MASK (0x00000002)
#define DP_DP_HPD_REPLUG_INT_MASK (0x00000004)
#define DP_DP_HPD_UNPLUG_INT_MASK (0x00000008)
#define DP_DP_HPD_INT_MASK (DP_DP_HPD_PLUG_INT_MASK | \
DP_DP_IRQ_HPD_INT_MASK | \
DP_DP_HPD_REPLUG_INT_MASK | \
DP_DP_HPD_UNPLUG_INT_MASK)
#define DP_DP_HPD_STATE_STATUS_CONNECTED (0x40000000)
#define DP_DP_HPD_STATE_STATUS_PENDING (0x20000000)
#define DP_DP_HPD_STATE_STATUS_DISCONNECTED (0x00000000)
#define DP_DP_HPD_STATE_STATUS_MASK (0xE0000000)
#define REG_DP_DP_HPD_REFTIMER (0x00000018)
#define DP_DP_HPD_REFTIMER_ENABLE (1 << 16)
#define REG_DP_DP_HPD_EVENT_TIME_0 (0x0000001C)
#define REG_DP_DP_HPD_EVENT_TIME_1 (0x00000020)
#define DP_DP_HPD_EVENT_TIME_0_VAL (0x3E800FA)
#define DP_DP_HPD_EVENT_TIME_1_VAL (0x1F407D0)
#define REG_DP_AUX_CTRL (0x00000030)
#define DP_AUX_CTRL_ENABLE (0x00000001)
#define DP_AUX_CTRL_RESET (0x00000002)
#define REG_DP_AUX_DATA (0x00000034)
#define DP_AUX_DATA_READ (0x00000001)
#define DP_AUX_DATA_WRITE (0x00000000)
#define DP_AUX_DATA_OFFSET (0x00000008)
#define DP_AUX_DATA_INDEX_OFFSET (0x00000010)
#define DP_AUX_DATA_MASK (0x0000ff00)
#define DP_AUX_DATA_INDEX_WRITE (0x80000000)
#define REG_DP_AUX_TRANS_CTRL (0x00000038)
#define DP_AUX_TRANS_CTRL_I2C (0x00000100)
#define DP_AUX_TRANS_CTRL_GO (0x00000200)
#define DP_AUX_TRANS_CTRL_NO_SEND_ADDR (0x00000400)
#define DP_AUX_TRANS_CTRL_NO_SEND_STOP (0x00000800)
#define REG_DP_TIMEOUT_COUNT (0x0000003C)
#define REG_DP_AUX_LIMITS (0x00000040)
#define REG_DP_AUX_STATUS (0x00000044)
#define DP_DPCD_CP_IRQ (0x201)
#define DP_DPCD_RXSTATUS (0x69493)
#define DP_INTERRUPT_TRANS_NUM (0x000000A0)
#define REG_DP_MAINLINK_CTRL (0x00000000)
#define DP_MAINLINK_CTRL_ENABLE (0x00000001)
#define DP_MAINLINK_CTRL_RESET (0x00000002)
#define DP_MAINLINK_CTRL_SW_BYPASS_SCRAMBLER (0x00000010)
#define DP_MAINLINK_FB_BOUNDARY_SEL (0x02000000)
#define REG_DP_STATE_CTRL (0x00000004)
#define DP_STATE_CTRL_LINK_TRAINING_PATTERN1 (0x00000001)
#define DP_STATE_CTRL_LINK_TRAINING_PATTERN2 (0x00000002)
#define DP_STATE_CTRL_LINK_TRAINING_PATTERN3 (0x00000004)
#define DP_STATE_CTRL_LINK_TRAINING_PATTERN4 (0x00000008)
#define DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE (0x00000010)
#define DP_STATE_CTRL_LINK_PRBS7 (0x00000020)
#define DP_STATE_CTRL_LINK_TEST_CUSTOM_PATTERN (0x00000040)
#define DP_STATE_CTRL_SEND_VIDEO (0x00000080)
#define DP_STATE_CTRL_PUSH_IDLE (0x00000100)
#define REG_DP_CONFIGURATION_CTRL (0x00000008)
#define DP_CONFIGURATION_CTRL_SYNC_ASYNC_CLK (0x00000001)
#define DP_CONFIGURATION_CTRL_STATIC_DYNAMIC_CN (0x00000002)
#define DP_CONFIGURATION_CTRL_P_INTERLACED (0x00000004)
#define DP_CONFIGURATION_CTRL_INTERLACED_BTF (0x00000008)
#define DP_CONFIGURATION_CTRL_NUM_OF_LANES (0x00000010)
#define DP_CONFIGURATION_CTRL_ENHANCED_FRAMING (0x00000040)
#define DP_CONFIGURATION_CTRL_SEND_VSC (0x00000080)
#define DP_CONFIGURATION_CTRL_BPC (0x00000100)
#define DP_CONFIGURATION_CTRL_ASSR (0x00000400)
#define DP_CONFIGURATION_CTRL_RGB_YUV (0x00000800)
#define DP_CONFIGURATION_CTRL_LSCLK_DIV (0x00002000)
#define DP_CONFIGURATION_CTRL_NUM_OF_LANES_SHIFT (0x04)
#define DP_CONFIGURATION_CTRL_BPC_SHIFT (0x08)
#define DP_CONFIGURATION_CTRL_LSCLK_DIV_SHIFT (0x0D)
#define REG_DP_SOFTWARE_MVID (0x00000010)
#define REG_DP_SOFTWARE_NVID (0x00000018)
#define REG_DP_TOTAL_HOR_VER (0x0000001C)
#define REG_DP_START_HOR_VER_FROM_SYNC (0x00000020)
#define REG_DP_HSYNC_VSYNC_WIDTH_POLARITY (0x00000024)
#define REG_DP_ACTIVE_HOR_VER (0x00000028)
#define REG_DP_MISC1_MISC0 (0x0000002C)
#define DP_MISC0_SYNCHRONOUS_CLK (0x00000001)
#define DP_MISC0_COLORIMETRY_CFG_SHIFT (0x00000001)
#define DP_MISC0_TEST_BITS_DEPTH_SHIFT (0x00000005)
#define REG_DP_VALID_BOUNDARY (0x00000030)
#define REG_DP_VALID_BOUNDARY_2 (0x00000034)
#define REG_DP_LOGICAL2PHYSICAL_LANE_MAPPING (0x00000038)
#define LANE0_MAPPING_SHIFT (0x00000000)
#define LANE1_MAPPING_SHIFT (0x00000002)
#define LANE2_MAPPING_SHIFT (0x00000004)
#define LANE3_MAPPING_SHIFT (0x00000006)
#define REG_DP_MAINLINK_READY (0x00000040)
#define DP_MAINLINK_READY_FOR_VIDEO (0x00000001)
#define DP_MAINLINK_READY_LINK_TRAINING_SHIFT (0x00000003)
#define REG_DP_MAINLINK_LEVELS (0x00000044)
#define DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2 (0x00000002)
#define REG_DP_TU (0x0000004C)
#define REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET (0x00000054)
#define DP_HBR2_ERM_PATTERN (0x00010000)
#define REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG0 (0x000000C0)
#define REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG1 (0x000000C4)
#define REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG2 (0x000000C8)
#define MMSS_DP_MISC1_MISC0 (0x0000002C)
#define MMSS_DP_AUDIO_TIMING_GEN (0x00000080)
#define MMSS_DP_AUDIO_TIMING_RBR_32 (0x00000084)
#define MMSS_DP_AUDIO_TIMING_HBR_32 (0x00000088)
#define MMSS_DP_AUDIO_TIMING_RBR_44 (0x0000008C)
#define MMSS_DP_AUDIO_TIMING_HBR_44 (0x00000090)
#define MMSS_DP_AUDIO_TIMING_RBR_48 (0x00000094)
#define MMSS_DP_AUDIO_TIMING_HBR_48 (0x00000098)
#define MMSS_DP_PSR_CRC_RG (0x00000154)
#define MMSS_DP_PSR_CRC_B (0x00000158)
#define REG_DP_COMPRESSION_MODE_CTRL (0x00000180)
#define MMSS_DP_AUDIO_CFG (0x00000200)
#define MMSS_DP_AUDIO_STATUS (0x00000204)
#define MMSS_DP_AUDIO_PKT_CTRL (0x00000208)
#define MMSS_DP_AUDIO_PKT_CTRL2 (0x0000020C)
#define MMSS_DP_AUDIO_ACR_CTRL (0x00000210)
#define MMSS_DP_AUDIO_CTRL_RESET (0x00000214)
#define MMSS_DP_SDP_CFG (0x00000228)
#define MMSS_DP_SDP_CFG2 (0x0000022C)
#define MMSS_DP_AUDIO_TIMESTAMP_0 (0x00000230)
#define MMSS_DP_AUDIO_TIMESTAMP_1 (0x00000234)
#define MMSS_DP_AUDIO_STREAM_0 (0x00000240)
#define MMSS_DP_AUDIO_STREAM_1 (0x00000244)
#define MMSS_DP_EXTENSION_0 (0x00000250)
#define MMSS_DP_EXTENSION_1 (0x00000254)
#define MMSS_DP_EXTENSION_2 (0x00000258)
#define MMSS_DP_EXTENSION_3 (0x0000025C)
#define MMSS_DP_EXTENSION_4 (0x00000260)
#define MMSS_DP_EXTENSION_5 (0x00000264)
#define MMSS_DP_EXTENSION_6 (0x00000268)
#define MMSS_DP_EXTENSION_7 (0x0000026C)
#define MMSS_DP_EXTENSION_8 (0x00000270)
#define MMSS_DP_EXTENSION_9 (0x00000274)
#define MMSS_DP_AUDIO_COPYMANAGEMENT_0 (0x00000278)
#define MMSS_DP_AUDIO_COPYMANAGEMENT_1 (0x0000027C)
#define MMSS_DP_AUDIO_COPYMANAGEMENT_2 (0x00000280)
#define MMSS_DP_AUDIO_COPYMANAGEMENT_3 (0x00000284)
#define MMSS_DP_AUDIO_COPYMANAGEMENT_4 (0x00000288)
#define MMSS_DP_AUDIO_COPYMANAGEMENT_5 (0x0000028C)
#define MMSS_DP_AUDIO_ISRC_0 (0x00000290)
#define MMSS_DP_AUDIO_ISRC_1 (0x00000294)
#define MMSS_DP_AUDIO_ISRC_2 (0x00000298)
#define MMSS_DP_AUDIO_ISRC_3 (0x0000029C)
#define MMSS_DP_AUDIO_ISRC_4 (0x000002A0)
#define MMSS_DP_AUDIO_ISRC_5 (0x000002A4)
#define MMSS_DP_AUDIO_INFOFRAME_0 (0x000002A8)
#define MMSS_DP_AUDIO_INFOFRAME_1 (0x000002AC)
#define MMSS_DP_AUDIO_INFOFRAME_2 (0x000002B0)
#define MMSS_DP_GENERIC0_0 (0x00000300)
#define MMSS_DP_GENERIC0_1 (0x00000304)
#define MMSS_DP_GENERIC0_2 (0x00000308)
#define MMSS_DP_GENERIC0_3 (0x0000030C)
#define MMSS_DP_GENERIC0_4 (0x00000310)
#define MMSS_DP_GENERIC0_5 (0x00000314)
#define MMSS_DP_GENERIC0_6 (0x00000318)
#define MMSS_DP_GENERIC0_7 (0x0000031C)
#define MMSS_DP_GENERIC0_8 (0x00000320)
#define MMSS_DP_GENERIC0_9 (0x00000324)
#define MMSS_DP_GENERIC1_0 (0x00000328)
#define MMSS_DP_GENERIC1_1 (0x0000032C)
#define MMSS_DP_GENERIC1_2 (0x00000330)
#define MMSS_DP_GENERIC1_3 (0x00000334)
#define MMSS_DP_GENERIC1_4 (0x00000338)
#define MMSS_DP_GENERIC1_5 (0x0000033C)
#define MMSS_DP_GENERIC1_6 (0x00000340)
#define MMSS_DP_GENERIC1_7 (0x00000344)
#define MMSS_DP_GENERIC1_8 (0x00000348)
#define MMSS_DP_GENERIC1_9 (0x0000034C)
#define MMSS_DP_VSCEXT_0 (0x000002D0)
#define MMSS_DP_VSCEXT_1 (0x000002D4)
#define MMSS_DP_VSCEXT_2 (0x000002D8)
#define MMSS_DP_VSCEXT_3 (0x000002DC)
#define MMSS_DP_VSCEXT_4 (0x000002E0)
#define MMSS_DP_VSCEXT_5 (0x000002E4)
#define MMSS_DP_VSCEXT_6 (0x000002E8)
#define MMSS_DP_VSCEXT_7 (0x000002EC)
#define MMSS_DP_VSCEXT_8 (0x000002F0)
#define MMSS_DP_VSCEXT_9 (0x000002F4)
#define MMSS_DP_BIST_ENABLE (0x00000000)
#define DP_BIST_ENABLE_DPBIST_EN (0x00000001)
#define MMSS_DP_TIMING_ENGINE_EN (0x00000010)
#define DP_TIMING_ENGINE_EN_EN (0x00000001)
#define MMSS_DP_INTF_CONFIG (0x00000014)
#define MMSS_DP_INTF_HSYNC_CTL (0x00000018)
#define MMSS_DP_INTF_VSYNC_PERIOD_F0 (0x0000001C)
#define MMSS_DP_INTF_VSYNC_PERIOD_F1 (0x00000020)
#define MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0 (0x00000024)
#define MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1 (0x00000028)
#define MMSS_INTF_DISPLAY_V_START_F0 (0x0000002C)
#define MMSS_INTF_DISPLAY_V_START_F1 (0x00000030)
#define MMSS_DP_INTF_DISPLAY_V_END_F0 (0x00000034)
#define MMSS_DP_INTF_DISPLAY_V_END_F1 (0x00000038)
#define MMSS_DP_INTF_ACTIVE_V_START_F0 (0x0000003C)
#define MMSS_DP_INTF_ACTIVE_V_START_F1 (0x00000040)
#define MMSS_DP_INTF_ACTIVE_V_END_F0 (0x00000044)
#define MMSS_DP_INTF_ACTIVE_V_END_F1 (0x00000048)
#define MMSS_DP_INTF_DISPLAY_HCTL (0x0000004C)
#define MMSS_DP_INTF_ACTIVE_HCTL (0x00000050)
#define MMSS_DP_INTF_POLARITY_CTL (0x00000058)
#define MMSS_DP_TPG_MAIN_CONTROL (0x00000060)
#define MMSS_DP_DSC_DTO (0x0000007C)
#define DP_TPG_CHECKERED_RECT_PATTERN (0x00000100)
#define MMSS_DP_TPG_VIDEO_CONFIG (0x00000064)
#define DP_TPG_VIDEO_CONFIG_BPP_8BIT (0x00000001)
#define DP_TPG_VIDEO_CONFIG_RGB (0x00000004)
#define MMSS_DP_ASYNC_FIFO_CONFIG (0x00000088)
#define REG_DP_PHY_AUX_INTERRUPT_CLEAR (0x0000004C)
#define REG_DP_PHY_AUX_BIST_CFG (0x00000050)
#define REG_DP_PHY_AUX_INTERRUPT_STATUS (0x000000BC)
/* DP HDCP 1.3 registers */
#define DP_HDCP_CTRL (0x0A0)
#define DP_HDCP_STATUS (0x0A4)
#define DP_HDCP_SW_UPPER_AKSV (0x098)
#define DP_HDCP_SW_LOWER_AKSV (0x09C)
#define DP_HDCP_ENTROPY_CTRL0 (0x350)
#define DP_HDCP_ENTROPY_CTRL1 (0x35C)
#define DP_HDCP_SHA_STATUS (0x0C8)
#define DP_HDCP_RCVPORT_DATA2_0 (0x0B0)
#define DP_HDCP_RCVPORT_DATA3 (0x0A4)
#define DP_HDCP_RCVPORT_DATA4 (0x0A8)
#define DP_HDCP_RCVPORT_DATA5 (0x0C0)
#define DP_HDCP_RCVPORT_DATA6 (0x0C4)
#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_SHA_CTRL (0x024)
#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_SHA_DATA (0x028)
#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA0 (0x004)
#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA1 (0x008)
#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA7 (0x00C)
#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA8 (0x010)
#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA9 (0x014)
#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA10 (0x018)
#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA11 (0x01C)
#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA12 (0x020)
#endif /* _DP_REG_H_ */

View File

@ -30,6 +30,8 @@ enum msm_dsi_phy_type {
MSM_DSI_PHY_28NM_8960,
MSM_DSI_PHY_14NM,
MSM_DSI_PHY_10NM,
MSM_DSI_PHY_7NM,
MSM_DSI_PHY_7NM_V4_1,
MSM_DSI_PHY_MAX
};

View File

@ -1886,5 +1886,428 @@ static inline uint32_t REG_DSI_10nm_PHY_LN_TX_DCTRL(uint32_t i0) { return 0x0000
#define REG_DSI_10nm_PHY_PLL_COMMON_STATUS_ONE 0x000001a0
#define REG_DSI_7nm_PHY_CMN_REVISION_ID0 0x00000000
#define REG_DSI_7nm_PHY_CMN_REVISION_ID1 0x00000004
#define REG_DSI_7nm_PHY_CMN_REVISION_ID2 0x00000008
#define REG_DSI_7nm_PHY_CMN_REVISION_ID3 0x0000000c
#define REG_DSI_7nm_PHY_CMN_CLK_CFG0 0x00000010
#define REG_DSI_7nm_PHY_CMN_CLK_CFG1 0x00000014
#define REG_DSI_7nm_PHY_CMN_GLBL_CTRL 0x00000018
#define REG_DSI_7nm_PHY_CMN_RBUF_CTRL 0x0000001c
#define REG_DSI_7nm_PHY_CMN_VREG_CTRL_0 0x00000020
#define REG_DSI_7nm_PHY_CMN_CTRL_0 0x00000024
#define REG_DSI_7nm_PHY_CMN_CTRL_1 0x00000028
#define REG_DSI_7nm_PHY_CMN_CTRL_2 0x0000002c
#define REG_DSI_7nm_PHY_CMN_CTRL_3 0x00000030
#define REG_DSI_7nm_PHY_CMN_LANE_CFG0 0x00000034
#define REG_DSI_7nm_PHY_CMN_LANE_CFG1 0x00000038
#define REG_DSI_7nm_PHY_CMN_PLL_CNTRL 0x0000003c
#define REG_DSI_7nm_PHY_CMN_DPHY_SOT 0x00000040
#define REG_DSI_7nm_PHY_CMN_LANE_CTRL0 0x000000a0
#define REG_DSI_7nm_PHY_CMN_LANE_CTRL1 0x000000a4
#define REG_DSI_7nm_PHY_CMN_LANE_CTRL2 0x000000a8
#define REG_DSI_7nm_PHY_CMN_LANE_CTRL3 0x000000ac
#define REG_DSI_7nm_PHY_CMN_LANE_CTRL4 0x000000b0
#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0 0x000000b4
#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_1 0x000000b8
#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_2 0x000000bc
#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_3 0x000000c0
#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4 0x000000c4
#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5 0x000000c8
#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6 0x000000cc
#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7 0x000000d0
#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8 0x000000d4
#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9 0x000000d8
#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10 0x000000dc
#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11 0x000000e0
#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_12 0x000000e4
#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_13 0x000000e8
#define REG_DSI_7nm_PHY_CMN_GLBL_HSTX_STR_CTRL_0 0x000000ec
#define REG_DSI_7nm_PHY_CMN_GLBL_HSTX_STR_CTRL_1 0x000000f0
#define REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_TOP_CTRL 0x000000f4
#define REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_BOT_CTRL 0x000000f8
#define REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_MID_CTRL 0x000000fc
#define REG_DSI_7nm_PHY_CMN_GLBL_LPTX_STR_CTRL 0x00000100
#define REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_0 0x00000104
#define REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_1 0x00000108
#define REG_DSI_7nm_PHY_CMN_GLBL_STR_SWI_CAL_SEL_CTRL 0x0000010c
#define REG_DSI_7nm_PHY_CMN_VREG_CTRL_1 0x00000110
#define REG_DSI_7nm_PHY_CMN_CTRL_4 0x00000114
#define REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4 0x00000128
#define REG_DSI_7nm_PHY_CMN_PHY_STATUS 0x00000140
#define REG_DSI_7nm_PHY_CMN_LANE_STATUS0 0x00000148
#define REG_DSI_7nm_PHY_CMN_LANE_STATUS1 0x0000014c
static inline uint32_t REG_DSI_7nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x80*i0; }
static inline uint32_t REG_DSI_7nm_PHY_LN_CFG0(uint32_t i0) { return 0x00000000 + 0x80*i0; }
static inline uint32_t REG_DSI_7nm_PHY_LN_CFG1(uint32_t i0) { return 0x00000004 + 0x80*i0; }
static inline uint32_t REG_DSI_7nm_PHY_LN_CFG2(uint32_t i0) { return 0x00000008 + 0x80*i0; }
static inline uint32_t REG_DSI_7nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x0000000c + 0x80*i0; }
static inline uint32_t REG_DSI_7nm_PHY_LN_PIN_SWAP(uint32_t i0) { return 0x00000010 + 0x80*i0; }
static inline uint32_t REG_DSI_7nm_PHY_LN_LPRX_CTRL(uint32_t i0) { return 0x00000014 + 0x80*i0; }
static inline uint32_t REG_DSI_7nm_PHY_LN_TX_DCTRL(uint32_t i0) { return 0x00000018 + 0x80*i0; }
#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_ONE 0x00000000
#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_TWO 0x00000004
#define REG_DSI_7nm_PHY_PLL_INT_LOOP_SETTINGS 0x00000008
#define REG_DSI_7nm_PHY_PLL_INT_LOOP_SETTINGS_TWO 0x0000000c
#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_THREE 0x00000010
#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FOUR 0x00000014
#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE 0x00000018
#define REG_DSI_7nm_PHY_PLL_INT_LOOP_CONTROLS 0x0000001c
#define REG_DSI_7nm_PHY_PLL_DSM_DIVIDER 0x00000020
#define REG_DSI_7nm_PHY_PLL_FEEDBACK_DIVIDER 0x00000024
#define REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES 0x00000028
#define REG_DSI_7nm_PHY_PLL_FREQ_UPDATE_CONTROL_OVERRIDES 0x0000002c
#define REG_DSI_7nm_PHY_PLL_CMODE 0x00000030
#define REG_DSI_7nm_PHY_PLL_PSM_CTRL 0x00000034
#define REG_DSI_7nm_PHY_PLL_RSM_CTRL 0x00000038
#define REG_DSI_7nm_PHY_PLL_VCO_TUNE_MAP 0x0000003c
#define REG_DSI_7nm_PHY_PLL_PLL_CNTRL 0x00000040
#define REG_DSI_7nm_PHY_PLL_CALIBRATION_SETTINGS 0x00000044
#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_TIMER_LOW 0x00000048
#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_TIMER_HIGH 0x0000004c
#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS 0x00000050
#define REG_DSI_7nm_PHY_PLL_BAND_SEL_MIN 0x00000054
#define REG_DSI_7nm_PHY_PLL_BAND_SEL_MAX 0x00000058
#define REG_DSI_7nm_PHY_PLL_BAND_SEL_PFILT 0x0000005c
#define REG_DSI_7nm_PHY_PLL_BAND_SEL_IFILT 0x00000060
#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_TWO 0x00000064
#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE 0x00000068
#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_FOUR 0x0000006c
#define REG_DSI_7nm_PHY_PLL_BAND_SEL_ICODE_HIGH 0x00000070
#define REG_DSI_7nm_PHY_PLL_BAND_SEL_ICODE_LOW 0x00000074
#define REG_DSI_7nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE 0x00000078
#define REG_DSI_7nm_PHY_PLL_FREQ_DETECT_THRESH 0x0000007c
#define REG_DSI_7nm_PHY_PLL_FREQ_DET_REFCLK_HIGH 0x00000080
#define REG_DSI_7nm_PHY_PLL_FREQ_DET_REFCLK_LOW 0x00000084
#define REG_DSI_7nm_PHY_PLL_FREQ_DET_PLLCLK_HIGH 0x00000088
#define REG_DSI_7nm_PHY_PLL_FREQ_DET_PLLCLK_LOW 0x0000008c
#define REG_DSI_7nm_PHY_PLL_PFILT 0x00000090
#define REG_DSI_7nm_PHY_PLL_IFILT 0x00000094
#define REG_DSI_7nm_PHY_PLL_PLL_GAIN 0x00000098
#define REG_DSI_7nm_PHY_PLL_ICODE_LOW 0x0000009c
#define REG_DSI_7nm_PHY_PLL_ICODE_HIGH 0x000000a0
#define REG_DSI_7nm_PHY_PLL_LOCKDET 0x000000a4
#define REG_DSI_7nm_PHY_PLL_OUTDIV 0x000000a8
#define REG_DSI_7nm_PHY_PLL_FASTLOCK_CONTROL 0x000000ac
#define REG_DSI_7nm_PHY_PLL_PASS_OUT_OVERRIDE_ONE 0x000000b0
#define REG_DSI_7nm_PHY_PLL_PASS_OUT_OVERRIDE_TWO 0x000000b4
#define REG_DSI_7nm_PHY_PLL_CORE_OVERRIDE 0x000000b8
#define REG_DSI_7nm_PHY_PLL_CORE_INPUT_OVERRIDE 0x000000bc
#define REG_DSI_7nm_PHY_PLL_RATE_CHANGE 0x000000c0
#define REG_DSI_7nm_PHY_PLL_PLL_DIGITAL_TIMERS 0x000000c4
#define REG_DSI_7nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO 0x000000c8
#define REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START 0x000000cc
#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW 0x000000d0
#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID 0x000000d4
#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH 0x000000d8
#define REG_DSI_7nm_PHY_PLL_DEC_FRAC_MUXES 0x000000dc
#define REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1 0x000000e0
#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1 0x000000e4
#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1 0x000000e8
#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1 0x000000ec
#define REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_2 0x000000f0
#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_2 0x000000f4
#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_2 0x000000f8
#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_2 0x000000fc
#define REG_DSI_7nm_PHY_PLL_MASH_CONTROL 0x00000100
#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW 0x00000104
#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH 0x00000108
#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW 0x0000010c
#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH 0x00000110
#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW 0x00000114
#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH 0x00000118
#define REG_DSI_7nm_PHY_PLL_SSC_MUX_CONTROL 0x0000011c
#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_1 0x00000120
#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_1 0x00000124
#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_1 0x00000128
#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_1 0x0000012c
#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_1 0x00000130
#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_1 0x00000134
#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_2 0x00000138
#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_2 0x0000013c
#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_2 0x00000140
#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_2 0x00000144
#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_2 0x00000148
#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_2 0x0000014c
#define REG_DSI_7nm_PHY_PLL_SSC_CONTROL 0x00000150
#define REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE 0x00000154
#define REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1 0x00000158
#define REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_2 0x0000015c
#define REG_DSI_7nm_PHY_PLL_PLL_PROP_GAIN_RATE_1 0x00000160
#define REG_DSI_7nm_PHY_PLL_PLL_PROP_GAIN_RATE_2 0x00000164
#define REG_DSI_7nm_PHY_PLL_PLL_BAND_SEL_RATE_1 0x00000168
#define REG_DSI_7nm_PHY_PLL_PLL_BAND_SEL_RATE_2 0x0000016c
#define REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1 0x00000170
#define REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_2 0x00000174
#define REG_DSI_7nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1 0x00000178
#define REG_DSI_7nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_2 0x0000017c
#define REG_DSI_7nm_PHY_PLL_PLL_FASTLOCK_EN_BAND 0x00000180
#define REG_DSI_7nm_PHY_PLL_FREQ_TUNE_ACCUM_INIT_MID 0x00000184
#define REG_DSI_7nm_PHY_PLL_FREQ_TUNE_ACCUM_INIT_HIGH 0x00000188
#define REG_DSI_7nm_PHY_PLL_FREQ_TUNE_ACCUM_INIT_MUX 0x0000018c
#define REG_DSI_7nm_PHY_PLL_PLL_LOCK_OVERRIDE 0x00000190
#define REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY 0x00000194
#define REG_DSI_7nm_PHY_PLL_PLL_LOCK_MIN_DELAY 0x00000198
#define REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS 0x0000019c
#define REG_DSI_7nm_PHY_PLL_SPARE_AND_JPC_OVERRIDES 0x000001a0
#define REG_DSI_7nm_PHY_PLL_BIAS_CONTROL_1 0x000001a4
#define REG_DSI_7nm_PHY_PLL_BIAS_CONTROL_2 0x000001a8
#define REG_DSI_7nm_PHY_PLL_ALOG_OBSV_BUS_CTRL_1 0x000001ac
#define REG_DSI_7nm_PHY_PLL_COMMON_STATUS_ONE 0x000001b0
#define REG_DSI_7nm_PHY_PLL_COMMON_STATUS_TWO 0x000001b4
#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL 0x000001b8
#define REG_DSI_7nm_PHY_PLL_ICODE_ACCUM_STATUS_LOW 0x000001bc
#define REG_DSI_7nm_PHY_PLL_ICODE_ACCUM_STATUS_HIGH 0x000001c0
#define REG_DSI_7nm_PHY_PLL_FD_OUT_LOW 0x000001c4
#define REG_DSI_7nm_PHY_PLL_FD_OUT_HIGH 0x000001c8
#define REG_DSI_7nm_PHY_PLL_ALOG_OBSV_BUS_STATUS_1 0x000001cc
#define REG_DSI_7nm_PHY_PLL_PLL_MISC_CONFIG 0x000001d0
#define REG_DSI_7nm_PHY_PLL_FLL_CONFIG 0x000001d4
#define REG_DSI_7nm_PHY_PLL_FLL_FREQ_ACQ_TIME 0x000001d8
#define REG_DSI_7nm_PHY_PLL_FLL_CODE0 0x000001dc
#define REG_DSI_7nm_PHY_PLL_FLL_CODE1 0x000001e0
#define REG_DSI_7nm_PHY_PLL_FLL_GAIN0 0x000001e4
#define REG_DSI_7nm_PHY_PLL_FLL_GAIN1 0x000001e8
#define REG_DSI_7nm_PHY_PLL_SW_RESET 0x000001ec
#define REG_DSI_7nm_PHY_PLL_FAST_PWRUP 0x000001f0
#define REG_DSI_7nm_PHY_PLL_LOCKTIME0 0x000001f4
#define REG_DSI_7nm_PHY_PLL_LOCKTIME1 0x000001f8
#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS_SEL 0x000001fc
#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS0 0x00000200
#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS1 0x00000204
#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS2 0x00000208
#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS3 0x0000020c
#define REG_DSI_7nm_PHY_PLL_ANALOG_FLL_CONTROL_OVERRIDES 0x00000210
#define REG_DSI_7nm_PHY_PLL_VCO_CONFIG 0x00000214
#define REG_DSI_7nm_PHY_PLL_VCO_CAL_CODE1_MODE0_STATUS 0x00000218
#define REG_DSI_7nm_PHY_PLL_VCO_CAL_CODE1_MODE1_STATUS 0x0000021c
#define REG_DSI_7nm_PHY_PLL_RESET_SM_STATUS 0x00000220
#define REG_DSI_7nm_PHY_PLL_TDC_OFFSET 0x00000224
#define REG_DSI_7nm_PHY_PLL_PS3_PWRDOWN_CONTROLS 0x00000228
#define REG_DSI_7nm_PHY_PLL_PS4_PWRDOWN_CONTROLS 0x0000022c
#define REG_DSI_7nm_PHY_PLL_PLL_RST_CONTROLS 0x00000230
#define REG_DSI_7nm_PHY_PLL_GEAR_BAND_SELECT_CONTROLS 0x00000234
#define REG_DSI_7nm_PHY_PLL_PSM_CLK_CONTROLS 0x00000238
#define REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES_2 0x0000023c
#define REG_DSI_7nm_PHY_PLL_VCO_CONFIG_1 0x00000240
#define REG_DSI_7nm_PHY_PLL_VCO_CONFIG_2 0x00000244
#define REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS_1 0x00000248
#define REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS_2 0x0000024c
#define REG_DSI_7nm_PHY_PLL_CMODE_1 0x00000250
#define REG_DSI_7nm_PHY_PLL_CMODE_2 0x00000254
#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_1 0x00000258
#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_2 0x0000025c
#define REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE 0x00000260
#endif /* DSI_XML */

View File

@ -265,9 +265,12 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
&msm8998_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1,
&sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_3_0,
&sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_4_0,
&sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_4_1,
&sc7180_dsi_cfg, &msm_dsi_6g_v2_host_ops},
};
const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)

View File

@ -21,6 +21,8 @@
#define MSM_DSI_6G_VER_MINOR_V2_1_0 0x20010000
#define MSM_DSI_6G_VER_MINOR_V2_2_0 0x20000000
#define MSM_DSI_6G_VER_MINOR_V2_2_1 0x20020001
#define MSM_DSI_6G_VER_MINOR_V2_3_0 0x20030000
#define MSM_DSI_6G_VER_MINOR_V2_4_0 0x20040000
#define MSM_DSI_6G_VER_MINOR_V2_4_1 0x20040001
#define MSM_DSI_V2_VER_MINOR_8064 0x0

View File

@ -364,6 +364,102 @@ int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
return 0;
}
int msm_dsi_dphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
struct msm_dsi_phy_clk_request *clk_req)
{
const unsigned long bit_rate = clk_req->bitclk_rate;
const unsigned long esc_rate = clk_req->escclk_rate;
s32 ui, ui_x8;
s32 tmax, tmin;
s32 pcnt_clk_prep = 50;
s32 pcnt_clk_zero = 2;
s32 pcnt_clk_trail = 30;
s32 pcnt_hs_prep = 50;
s32 pcnt_hs_zero = 10;
s32 pcnt_hs_trail = 30;
s32 pcnt_hs_exit = 10;
s32 coeff = 1000; /* Precision, should avoid overflow */
s32 hb_en;
s32 temp;
if (!bit_rate || !esc_rate)
return -EINVAL;
hb_en = 0;
ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
ui_x8 = ui << 3;
/* TODO: verify these calculations against latest downstream driver
* everything except clk_post/clk_pre uses calculations from v3 based
* on the downstream driver having the same calculations for v3 and v4
*/
temp = S_DIV_ROUND_UP(38 * coeff, ui_x8);
tmin = max_t(s32, temp, 0);
temp = (95 * coeff) / ui_x8;
tmax = max_t(s32, temp, 0);
timing->clk_prepare = linear_inter(tmax, tmin, pcnt_clk_prep, 0, false);
temp = 300 * coeff - (timing->clk_prepare << 3) * ui;
tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
tmax = (tmin > 255) ? 511 : 255;
timing->clk_zero = linear_inter(tmax, tmin, pcnt_clk_zero, 0, false);
tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8);
temp = 105 * coeff + 12 * ui - 20 * coeff;
tmax = (temp + 3 * ui) / ui_x8;
timing->clk_trail = linear_inter(tmax, tmin, pcnt_clk_trail, 0, false);
temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui, ui_x8);
tmin = max_t(s32, temp, 0);
temp = (85 * coeff + 6 * ui) / ui_x8;
tmax = max_t(s32, temp, 0);
timing->hs_prepare = linear_inter(tmax, tmin, pcnt_hs_prep, 0, false);
temp = 145 * coeff + 10 * ui - (timing->hs_prepare << 3) * ui;
tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
tmax = 255;
timing->hs_zero = linear_inter(tmax, tmin, pcnt_hs_zero, 0, false);
tmin = DIV_ROUND_UP(60 * coeff + 4 * ui, ui_x8) - 1;
temp = 105 * coeff + 12 * ui - 20 * coeff;
tmax = (temp / ui_x8) - 1;
timing->hs_trail = linear_inter(tmax, tmin, pcnt_hs_trail, 0, false);
temp = 50 * coeff + ((hb_en << 2) - 8) * ui;
timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8);
tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1;
tmax = 255;
timing->hs_exit = linear_inter(tmax, tmin, pcnt_hs_exit, 0, false);
/* recommended min
* = roundup((mipi_min_ns + t_hs_trail_ns)/(16*bit_clk_ns), 0) - 1
*/
temp = 60 * coeff + 52 * ui + + (timing->hs_trail + 1) * ui_x8;
tmin = DIV_ROUND_UP(temp, 16 * ui) - 1;
tmax = 255;
timing->shared_timings.clk_post = linear_inter(tmax, tmin, 5, 0, false);
/* recommended min
* val1 = (tlpx_ns + clk_prepare_ns + clk_zero_ns + hs_rqst_ns)
* val2 = (16 * bit_clk_ns)
* final = roundup(val1/val2, 0) - 1
*/
temp = 52 * coeff + (timing->clk_prepare + timing->clk_zero + 1) * ui_x8 + 54 * coeff;
tmin = DIV_ROUND_UP(temp, 16 * ui) - 1;
tmax = 255;
timing->shared_timings.clk_pre = DIV_ROUND_UP((tmax - tmin) * 125, 10000) + tmin;
DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
timing->clk_zero, timing->clk_trail, timing->clk_prepare, timing->hs_exit,
timing->hs_zero, timing->hs_prepare, timing->hs_trail, timing->hs_rqst);
return 0;
}
void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
u32 bit_mask)
{
@ -507,6 +603,12 @@ static const struct of_device_id dsi_phy_dt_match[] = {
.data = &dsi_phy_10nm_cfgs },
{ .compatible = "qcom,dsi-phy-10nm-8998",
.data = &dsi_phy_10nm_8998_cfgs },
#endif
#ifdef CONFIG_DRM_MSM_DSI_7NM_PHY
{ .compatible = "qcom,dsi-phy-7nm",
.data = &dsi_phy_7nm_cfgs },
{ .compatible = "qcom,dsi-phy-7nm-8150",
.data = &dsi_phy_7nm_8150_cfgs },
#endif
{}
};

View File

@ -48,10 +48,10 @@ extern const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs;
struct msm_dsi_dphy_timing {
u32 clk_pre;
u32 clk_post;
u32 clk_zero;
u32 clk_trail;
u32 clk_prepare;
@ -102,6 +102,8 @@ int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing,
struct msm_dsi_phy_clk_request *clk_req);
int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
struct msm_dsi_phy_clk_request *clk_req);
int msm_dsi_dphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
struct msm_dsi_phy_clk_request *clk_req);
void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
u32 bit_mask);
int msm_dsi_phy_init_common(struct msm_dsi_phy *phy);

View File

@ -0,0 +1,255 @@
/*
* SPDX-License-Identifier: GPL-2.0
* Copyright (c) 2018, The Linux Foundation
*/
#include <linux/iopoll.h>
#include "dsi_phy.h"
#include "dsi.xml.h"
static int dsi_phy_hw_v4_0_is_pll_on(struct msm_dsi_phy *phy)
{
void __iomem *base = phy->base;
u32 data = 0;
data = dsi_phy_read(base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL);
mb(); /* make sure read happened */
return (data & BIT(0));
}
static void dsi_phy_hw_v4_0_config_lpcdrx(struct msm_dsi_phy *phy, bool enable)
{
void __iomem *lane_base = phy->lane_base;
int phy_lane_0 = 0; /* TODO: Support all lane swap configs */
/*
* LPRX and CDRX need to enabled only for physical data lane
* corresponding to the logical data lane 0
*/
if (enable)
dsi_phy_write(lane_base +
REG_DSI_7nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0x3);
else
dsi_phy_write(lane_base +
REG_DSI_7nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0);
}
static void dsi_phy_hw_v4_0_lane_settings(struct msm_dsi_phy *phy)
{
int i;
const u8 tx_dctrl_0[] = { 0x00, 0x00, 0x00, 0x04, 0x01 };
const u8 tx_dctrl_1[] = { 0x40, 0x40, 0x40, 0x46, 0x41 };
const u8 *tx_dctrl = tx_dctrl_0;
void __iomem *lane_base = phy->lane_base;
if (phy->cfg->type == MSM_DSI_PHY_7NM_V4_1)
tx_dctrl = tx_dctrl_1;
/* Strength ctrl settings */
for (i = 0; i < 5; i++) {
/*
* Disable LPRX and CDRX for all lanes. And later on, it will
* be only enabled for the physical data lane corresponding
* to the logical data lane 0
*/
dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_LPRX_CTRL(i), 0);
dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_PIN_SWAP(i), 0x0);
}
dsi_phy_hw_v4_0_config_lpcdrx(phy, true);
/* other settings */
for (i = 0; i < 5; i++) {
dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_CFG0(i), 0x0);
dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_CFG1(i), 0x0);
dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_CFG2(i), i == 4 ? 0x8a : 0xa);
dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_TX_DCTRL(i), tx_dctrl[i]);
}
}
static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
struct msm_dsi_phy_clk_request *clk_req)
{
int ret;
u32 status;
u32 const delay_us = 5;
u32 const timeout_us = 1000;
struct msm_dsi_dphy_timing *timing = &phy->timing;
void __iomem *base = phy->base;
bool less_than_1500_mhz;
u32 vreg_ctrl_0, glbl_str_swi_cal_sel_ctrl, glbl_hstx_str_ctrl_0;
u32 glbl_rescode_top_ctrl, glbl_rescode_bot_ctrl;
u32 data;
DBG("");
if (msm_dsi_dphy_timing_calc_v4(timing, clk_req)) {
DRM_DEV_ERROR(&phy->pdev->dev,
"%s: D-PHY timing calculation failed\n", __func__);
return -EINVAL;
}
if (dsi_phy_hw_v4_0_is_pll_on(phy))
pr_warn("PLL turned on before configuring PHY\n");
/* wait for REFGEN READY */
ret = readl_poll_timeout_atomic(base + REG_DSI_7nm_PHY_CMN_PHY_STATUS,
status, (status & BIT(0)),
delay_us, timeout_us);
if (ret) {
pr_err("Ref gen not ready. Aborting\n");
return -EINVAL;
}
/* TODO: CPHY enable path (this is for DPHY only) */
/* Alter PHY configurations if data rate less than 1.5GHZ*/
less_than_1500_mhz = (clk_req->bitclk_rate <= 1500000000);
if (phy->cfg->type == MSM_DSI_PHY_7NM_V4_1) {
vreg_ctrl_0 = less_than_1500_mhz ? 0x53 : 0x52;
glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3d : 0x00;
glbl_rescode_bot_ctrl = less_than_1500_mhz ? 0x39 : 0x3c;
glbl_str_swi_cal_sel_ctrl = 0x00;
glbl_hstx_str_ctrl_0 = 0x88;
} else {
vreg_ctrl_0 = less_than_1500_mhz ? 0x5B : 0x59;
glbl_str_swi_cal_sel_ctrl = less_than_1500_mhz ? 0x03 : 0x00;
glbl_hstx_str_ctrl_0 = less_than_1500_mhz ? 0x66 : 0x88;
glbl_rescode_top_ctrl = 0x03;
glbl_rescode_bot_ctrl = 0x3c;
}
/* de-assert digital and pll power down */
data = BIT(6) | BIT(5);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_0, data);
/* Assert PLL core reset */
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL, 0x00);
/* turn off resync FIFO */
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_RBUF_CTRL, 0x00);
/* program CMN_CTRL_4 for minor_ver 2 chipsets*/
data = dsi_phy_read(base + REG_DSI_7nm_PHY_CMN_REVISION_ID0);
data = data & (0xf0);
if (data == 0x20)
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_4, 0x04);
/* Configure PHY lane swap (TODO: we need to calculate this) */
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CFG0, 0x21);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CFG1, 0x84);
/* Enable LDO */
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_VREG_CTRL_0, vreg_ctrl_0);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_VREG_CTRL_1, 0x5c);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_3, 0x00);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_STR_SWI_CAL_SEL_CTRL,
glbl_str_swi_cal_sel_ctrl);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_HSTX_STR_CTRL_0,
glbl_hstx_str_ctrl_0);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_0, 0x00);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_TOP_CTRL,
glbl_rescode_top_ctrl);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_BOT_CTRL,
glbl_rescode_bot_ctrl);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_LPTX_STR_CTRL, 0x55);
/* Remove power down from all blocks */
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_0, 0x7f);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0, 0x1f);
/* Select full-rate mode */
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_2, 0x40);
ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase);
if (ret) {
DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
__func__, ret);
return ret;
}
/* DSI PHY timings */
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0, 0x00);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_1, timing->clk_zero);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_2, timing->clk_prepare);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_3, timing->clk_trail);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4, timing->hs_exit);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5, timing->hs_zero);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6, timing->hs_prepare);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7, timing->hs_trail);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8, timing->hs_rqst);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9, 0x02);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10, 0x04);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11, 0x00);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_12,
timing->shared_timings.clk_pre);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_13,
timing->shared_timings.clk_post);
/* DSI lane settings */
dsi_phy_hw_v4_0_lane_settings(phy);
DBG("DSI%d PHY enabled", phy->id);
return 0;
}
static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)
{
/* TODO */
}
static int dsi_7nm_phy_init(struct msm_dsi_phy *phy)
{
struct platform_device *pdev = phy->pdev;
phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane",
"DSI_PHY_LANE");
if (IS_ERR(phy->lane_base)) {
DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy lane base\n",
__func__);
return -ENOMEM;
}
return 0;
}
const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs = {
.type = MSM_DSI_PHY_7NM_V4_1,
.src_pll_truthtable = { {false, false}, {true, false} },
.reg_cfg = {
.num = 1,
.regs = {
{"vdds", 36000, 32},
},
},
.ops = {
.enable = dsi_7nm_phy_enable,
.disable = dsi_7nm_phy_disable,
.init = dsi_7nm_phy_init,
},
.io_start = { 0xae94400, 0xae96400 },
.num_dsi_phy = 2,
};
const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs = {
.type = MSM_DSI_PHY_7NM,
.src_pll_truthtable = { {false, false}, {true, false} },
.reg_cfg = {
.num = 1,
.regs = {
{"vdds", 36000, 32},
},
},
.ops = {
.enable = dsi_7nm_phy_enable,
.disable = dsi_7nm_phy_disable,
.init = dsi_7nm_phy_init,
},
.io_start = { 0xae94400, 0xae96400 },
.num_dsi_phy = 2,
};

View File

@ -161,6 +161,10 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
case MSM_DSI_PHY_10NM:
pll = msm_dsi_pll_10nm_init(pdev, id);
break;
case MSM_DSI_PHY_7NM:
case MSM_DSI_PHY_7NM_V4_1:
pll = msm_dsi_pll_7nm_init(pdev, id);
break;
default:
pll = ERR_PTR(-ENXIO);
break;

View File

@ -116,5 +116,15 @@ msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
return ERR_PTR(-ENODEV);
}
#endif
#ifdef CONFIG_DRM_MSM_DSI_7NM_PHY
struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id);
#else
static inline struct msm_dsi_pll *
msm_dsi_pll_7nm_init(struct platform_device *pdev, int id)
{
return ERR_PTR(-ENODEV);
}
#endif
#endif /* __DSI_PLL_H__ */

View File

@ -0,0 +1,904 @@
/*
* SPDX-License-Identifier: GPL-2.0
* Copyright (c) 2018, The Linux Foundation
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/iopoll.h>
#include "dsi_pll.h"
#include "dsi.xml.h"
/*
* DSI PLL 7nm - clock diagram (eg: DSI0): TODO: updated CPHY diagram
*
* dsi0_pll_out_div_clk dsi0_pll_bit_clk
* | |
* | |
* +---------+ | +----------+ | +----+
* dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0_phy_pll_out_byteclk
* +---------+ | +----------+ | +----+
* | |
* | | dsi0_pll_by_2_bit_clk
* | | |
* | | +----+ | |\ dsi0_pclk_mux
* | |--| /2 |--o--| \ |
* | | +----+ | \ | +---------+
* | --------------| |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk
* |------------------------------| / +---------+
* | +-----+ | /
* -----------| /4? |--o----------|/
* +-----+ | |
* | |dsiclk_sel
* |
* dsi0_pll_post_out_div_clk
*/
#define DSI_BYTE_PLL_CLK 0
#define DSI_PIXEL_PLL_CLK 1
#define NUM_PROVIDED_CLKS 2
#define VCO_REF_CLK_RATE 19200000
struct dsi_pll_regs {
u32 pll_prop_gain_rate;
u32 pll_lockdet_rate;
u32 decimal_div_start;
u32 frac_div_start_low;
u32 frac_div_start_mid;
u32 frac_div_start_high;
u32 pll_clock_inverters;
u32 ssc_stepsize_low;
u32 ssc_stepsize_high;
u32 ssc_div_per_low;
u32 ssc_div_per_high;
u32 ssc_adjper_low;
u32 ssc_adjper_high;
u32 ssc_control;
};
struct dsi_pll_config {
u32 ref_freq;
bool div_override;
u32 output_div;
bool ignore_frac;
bool disable_prescaler;
bool enable_ssc;
bool ssc_center;
u32 dec_bits;
u32 frac_bits;
u32 lock_timer;
u32 ssc_freq;
u32 ssc_offset;
u32 ssc_adj_per;
u32 thresh_cycles;
u32 refclk_cycles;
};
struct pll_7nm_cached_state {
unsigned long vco_rate;
u8 bit_clk_div;
u8 pix_clk_div;
u8 pll_out_div;
u8 pll_mux;
};
struct dsi_pll_7nm {
struct msm_dsi_pll base;
int id;
struct platform_device *pdev;
void __iomem *phy_cmn_mmio;
void __iomem *mmio;
u64 vco_ref_clk_rate;
u64 vco_current_rate;
/* protects REG_DSI_7nm_PHY_CMN_CLK_CFG0 register */
spinlock_t postdiv_lock;
int vco_delay;
struct dsi_pll_config pll_configuration;
struct dsi_pll_regs reg_setup;
/* private clocks: */
struct clk_hw *out_div_clk_hw;
struct clk_hw *bit_clk_hw;
struct clk_hw *byte_clk_hw;
struct clk_hw *by_2_bit_clk_hw;
struct clk_hw *post_out_div_clk_hw;
struct clk_hw *pclk_mux_hw;
struct clk_hw *out_dsiclk_hw;
/* clock-provider: */
struct clk_hw_onecell_data *hw_data;
struct pll_7nm_cached_state cached_state;
enum msm_dsi_phy_usecase uc;
struct dsi_pll_7nm *slave;
};
#define to_pll_7nm(x) container_of(x, struct dsi_pll_7nm, base)
/*
* Global list of private DSI PLL struct pointers. We need this for Dual DSI
* mode, where the master PLL's clk_ops needs access the slave's private data
*/
static struct dsi_pll_7nm *pll_7nm_list[DSI_MAX];
static void dsi_pll_setup_config(struct dsi_pll_7nm *pll)
{
struct dsi_pll_config *config = &pll->pll_configuration;
config->ref_freq = pll->vco_ref_clk_rate;
config->output_div = 1;
config->dec_bits = 8;
config->frac_bits = 18;
config->lock_timer = 64;
config->ssc_freq = 31500;
config->ssc_offset = 4800;
config->ssc_adj_per = 2;
config->thresh_cycles = 32;
config->refclk_cycles = 256;
config->div_override = false;
config->ignore_frac = false;
config->disable_prescaler = false;
/* TODO: ssc enable */
config->enable_ssc = false;
config->ssc_center = 0;
}
static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll)
{
struct dsi_pll_config *config = &pll->pll_configuration;
struct dsi_pll_regs *regs = &pll->reg_setup;
u64 fref = pll->vco_ref_clk_rate;
u64 pll_freq;
u64 divider;
u64 dec, dec_multiple;
u32 frac;
u64 multiplier;
pll_freq = pll->vco_current_rate;
if (config->disable_prescaler)
divider = fref;
else
divider = fref * 2;
multiplier = 1 << config->frac_bits;
dec_multiple = div_u64(pll_freq * multiplier, divider);
div_u64_rem(dec_multiple, multiplier, &frac);
dec = div_u64(dec_multiple, multiplier);
if (pll->base.type != MSM_DSI_PHY_7NM_V4_1)
regs->pll_clock_inverters = 0x28;
else if (pll_freq <= 1000000000ULL)
regs->pll_clock_inverters = 0xa0;
else if (pll_freq <= 2500000000ULL)
regs->pll_clock_inverters = 0x20;
else if (pll_freq <= 3020000000ULL)
regs->pll_clock_inverters = 0x00;
else
regs->pll_clock_inverters = 0x40;
regs->pll_lockdet_rate = config->lock_timer;
regs->decimal_div_start = dec;
regs->frac_div_start_low = (frac & 0xff);
regs->frac_div_start_mid = (frac & 0xff00) >> 8;
regs->frac_div_start_high = (frac & 0x30000) >> 16;
}
#define SSC_CENTER BIT(0)
#define SSC_EN BIT(1)
static void dsi_pll_calc_ssc(struct dsi_pll_7nm *pll)
{
struct dsi_pll_config *config = &pll->pll_configuration;
struct dsi_pll_regs *regs = &pll->reg_setup;
u32 ssc_per;
u32 ssc_mod;
u64 ssc_step_size;
u64 frac;
if (!config->enable_ssc) {
DBG("SSC not enabled\n");
return;
}
ssc_per = DIV_ROUND_CLOSEST(config->ref_freq, config->ssc_freq) / 2 - 1;
ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1);
ssc_per -= ssc_mod;
frac = regs->frac_div_start_low |
(regs->frac_div_start_mid << 8) |
(regs->frac_div_start_high << 16);
ssc_step_size = regs->decimal_div_start;
ssc_step_size *= (1 << config->frac_bits);
ssc_step_size += frac;
ssc_step_size *= config->ssc_offset;
ssc_step_size *= (config->ssc_adj_per + 1);
ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1));
ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000);
regs->ssc_div_per_low = ssc_per & 0xFF;
regs->ssc_div_per_high = (ssc_per & 0xFF00) >> 8;
regs->ssc_stepsize_low = (u32)(ssc_step_size & 0xFF);
regs->ssc_stepsize_high = (u32)((ssc_step_size & 0xFF00) >> 8);
regs->ssc_adjper_low = config->ssc_adj_per & 0xFF;
regs->ssc_adjper_high = (config->ssc_adj_per & 0xFF00) >> 8;
regs->ssc_control = config->ssc_center ? SSC_CENTER : 0;
pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n",
regs->decimal_div_start, frac, config->frac_bits);
pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n",
ssc_per, (u32)ssc_step_size, config->ssc_adj_per);
}
static void dsi_pll_ssc_commit(struct dsi_pll_7nm *pll)
{
void __iomem *base = pll->mmio;
struct dsi_pll_regs *regs = &pll->reg_setup;
if (pll->pll_configuration.enable_ssc) {
pr_debug("SSC is enabled\n");
pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_1,
regs->ssc_stepsize_low);
pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_1,
regs->ssc_stepsize_high);
pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_1,
regs->ssc_div_per_low);
pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_1,
regs->ssc_div_per_high);
pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_1,
regs->ssc_adjper_low);
pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_1,
regs->ssc_adjper_high);
pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_CONTROL,
SSC_EN | regs->ssc_control);
}
}
static void dsi_pll_config_hzindep_reg(struct dsi_pll_7nm *pll)
{
void __iomem *base = pll->mmio;
u8 analog_controls_five_1 = 0x01, vco_config_1 = 0x00;
if (pll->base.type == MSM_DSI_PHY_7NM_V4_1) {
if (pll->vco_current_rate >= 3100000000ULL)
analog_controls_five_1 = 0x03;
if (pll->vco_current_rate < 1520000000ULL)
vco_config_1 = 0x08;
else if (pll->vco_current_rate < 2990000000ULL)
vco_config_1 = 0x01;
}
pll_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_1,
analog_controls_five_1);
pll_write(base + REG_DSI_7nm_PHY_PLL_VCO_CONFIG_1, vco_config_1);
pll_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE, 0x01);
pll_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_TWO, 0x03);
pll_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_THREE, 0x00);
pll_write(base + REG_DSI_7nm_PHY_PLL_DSM_DIVIDER, 0x00);
pll_write(base + REG_DSI_7nm_PHY_PLL_FEEDBACK_DIVIDER, 0x4e);
pll_write(base + REG_DSI_7nm_PHY_PLL_CALIBRATION_SETTINGS, 0x40);
pll_write(base + REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE, 0xba);
pll_write(base + REG_DSI_7nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c);
pll_write(base + REG_DSI_7nm_PHY_PLL_OUTDIV, 0x00);
pll_write(base + REG_DSI_7nm_PHY_PLL_CORE_OVERRIDE, 0x00);
pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO, 0x08);
pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_PROP_GAIN_RATE_1, 0x0a);
pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_BAND_SEL_RATE_1, 0xc0);
pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0x84);
pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0x82);
pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1, 0x4c);
pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_OVERRIDE, 0x80);
pll_write(base + REG_DSI_7nm_PHY_PLL_PFILT, 0x29);
pll_write(base + REG_DSI_7nm_PHY_PLL_PFILT, 0x2f);
pll_write(base + REG_DSI_7nm_PHY_PLL_IFILT, 0x2a);
pll_write(base + REG_DSI_7nm_PHY_PLL_IFILT,
pll->base.type == MSM_DSI_PHY_7NM_V4_1 ? 0x3f : 0x22);
if (pll->base.type == MSM_DSI_PHY_7NM_V4_1) {
pll_write(base + REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE, 0x22);
if (pll->slave)
pll_write(pll->slave->mmio + REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE, 0x22);
}
}
static void dsi_pll_commit(struct dsi_pll_7nm *pll)
{
void __iomem *base = pll->mmio;
struct dsi_pll_regs *reg = &pll->reg_setup;
pll_write(base + REG_DSI_7nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12);
pll_write(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1, reg->decimal_div_start);
pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1, reg->frac_div_start_low);
pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1, reg->frac_div_start_mid);
pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1, reg->frac_div_start_high);
pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40);
pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
pll_write(base + REG_DSI_7nm_PHY_PLL_CMODE_1, 0x10); /* TODO: 0x00 for CPHY */
pll_write(base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS, reg->pll_clock_inverters);
}
static int dsi_pll_7nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_7nm->id, rate,
parent_rate);
pll_7nm->vco_current_rate = rate;
pll_7nm->vco_ref_clk_rate = VCO_REF_CLK_RATE;
dsi_pll_setup_config(pll_7nm);
dsi_pll_calc_dec_frac(pll_7nm);
dsi_pll_calc_ssc(pll_7nm);
dsi_pll_commit(pll_7nm);
dsi_pll_config_hzindep_reg(pll_7nm);
dsi_pll_ssc_commit(pll_7nm);
/* flush, ensure all register writes are done*/
wmb();
return 0;
}
static int dsi_pll_7nm_lock_status(struct dsi_pll_7nm *pll)
{
int rc;
u32 status = 0;
u32 const delay_us = 100;
u32 const timeout_us = 5000;
rc = readl_poll_timeout_atomic(pll->mmio +
REG_DSI_7nm_PHY_PLL_COMMON_STATUS_ONE,
status,
((status & BIT(0)) > 0),
delay_us,
timeout_us);
if (rc)
pr_err("DSI PLL(%d) lock failed, status=0x%08x\n",
pll->id, status);
return rc;
}
static void dsi_pll_disable_pll_bias(struct dsi_pll_7nm *pll)
{
u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_0);
pll_write(pll->mmio + REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES, 0);
pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_0, data & ~BIT(5));
ndelay(250);
}
static void dsi_pll_enable_pll_bias(struct dsi_pll_7nm *pll)
{
u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_0);
pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_0, data | BIT(5));
pll_write(pll->mmio + REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES, 0xc0);
ndelay(250);
}
static void dsi_pll_disable_global_clk(struct dsi_pll_7nm *pll)
{
u32 data;
data = pll_read(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CLK_CFG1, data & ~BIT(5));
}
static void dsi_pll_enable_global_clk(struct dsi_pll_7nm *pll)
{
u32 data;
pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_3, 0x04);
data = pll_read(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CLK_CFG1,
data | BIT(5) | BIT(4));
}
static void dsi_pll_phy_dig_reset(struct dsi_pll_7nm *pll)
{
/*
* Reset the PHY digital domain. This would be needed when
* coming out of a CX or analog rail power collapse while
* ensuring that the pads maintain LP00 or LP11 state
*/
pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4, BIT(0));
wmb(); /* Ensure that the reset is deasserted */
pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4, 0x0);
wmb(); /* Ensure that the reset is deasserted */
}
static int dsi_pll_7nm_vco_prepare(struct clk_hw *hw)
{
struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
int rc;
dsi_pll_enable_pll_bias(pll_7nm);
if (pll_7nm->slave)
dsi_pll_enable_pll_bias(pll_7nm->slave);
/* Start PLL */
pll_write(pll_7nm->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_PLL_CNTRL, 0x01);
/*
* ensure all PLL configurations are written prior to checking
* for PLL lock.
*/
wmb();
/* Check for PLL lock */
rc = dsi_pll_7nm_lock_status(pll_7nm);
if (rc) {
pr_err("PLL(%d) lock failed\n", pll_7nm->id);
goto error;
}
pll->pll_on = true;
/*
* assert power on reset for PHY digital in case the PLL is
* enabled after CX of analog domain power collapse. This needs
* to be done before enabling the global clk.
*/
dsi_pll_phy_dig_reset(pll_7nm);
if (pll_7nm->slave)
dsi_pll_phy_dig_reset(pll_7nm->slave);
dsi_pll_enable_global_clk(pll_7nm);
if (pll_7nm->slave)
dsi_pll_enable_global_clk(pll_7nm->slave);
error:
return rc;
}
static void dsi_pll_disable_sub(struct dsi_pll_7nm *pll)
{
pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_RBUF_CTRL, 0);
dsi_pll_disable_pll_bias(pll);
}
static void dsi_pll_7nm_vco_unprepare(struct clk_hw *hw)
{
struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
/*
* To avoid any stray glitches while abruptly powering down the PLL
* make sure to gate the clock using the clock enable bit before
* powering down the PLL
*/
dsi_pll_disable_global_clk(pll_7nm);
pll_write(pll_7nm->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_PLL_CNTRL, 0);
dsi_pll_disable_sub(pll_7nm);
if (pll_7nm->slave) {
dsi_pll_disable_global_clk(pll_7nm->slave);
dsi_pll_disable_sub(pll_7nm->slave);
}
/* flush, ensure all register writes are done */
wmb();
pll->pll_on = false;
}
static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
void __iomem *base = pll_7nm->mmio;
u64 ref_clk = pll_7nm->vco_ref_clk_rate;
u64 vco_rate = 0x0;
u64 multiplier;
u32 frac;
u32 dec;
u64 pll_freq, tmp64;
dec = pll_read(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1);
dec &= 0xff;
frac = pll_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1);
frac |= ((pll_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1) &
0xff) << 8);
frac |= ((pll_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1) &
0x3) << 16);
/*
* TODO:
* 1. Assumes prescaler is disabled
* 2. Multiplier is 2^18. it should be 2^(num_of_frac_bits)
*/
multiplier = 1 << 18;
pll_freq = dec * (ref_clk * 2);
tmp64 = (ref_clk * 2 * frac);
pll_freq += div_u64(tmp64, multiplier);
vco_rate = pll_freq;
DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
pll_7nm->id, (unsigned long)vco_rate, dec, frac);
return (unsigned long)vco_rate;
}
static const struct clk_ops clk_ops_dsi_pll_7nm_vco = {
.round_rate = msm_dsi_pll_helper_clk_round_rate,
.set_rate = dsi_pll_7nm_vco_set_rate,
.recalc_rate = dsi_pll_7nm_vco_recalc_rate,
.prepare = dsi_pll_7nm_vco_prepare,
.unprepare = dsi_pll_7nm_vco_unprepare,
};
/*
* PLL Callbacks
*/
static void dsi_pll_7nm_save_state(struct msm_dsi_pll *pll)
{
struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
struct pll_7nm_cached_state *cached = &pll_7nm->cached_state;
void __iomem *phy_base = pll_7nm->phy_cmn_mmio;
u32 cmn_clk_cfg0, cmn_clk_cfg1;
cached->pll_out_div = pll_read(pll_7nm->mmio +
REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
cached->pll_out_div &= 0x3;
cmn_clk_cfg0 = pll_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0);
cached->bit_clk_div = cmn_clk_cfg0 & 0xf;
cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4;
cmn_clk_cfg1 = pll_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
cached->pll_mux = cmn_clk_cfg1 & 0x3;
DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x",
pll_7nm->id, cached->pll_out_div, cached->bit_clk_div,
cached->pix_clk_div, cached->pll_mux);
}
static int dsi_pll_7nm_restore_state(struct msm_dsi_pll *pll)
{
struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
struct pll_7nm_cached_state *cached = &pll_7nm->cached_state;
void __iomem *phy_base = pll_7nm->phy_cmn_mmio;
u32 val;
val = pll_read(pll_7nm->mmio + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
val &= ~0x3;
val |= cached->pll_out_div;
pll_write(pll_7nm->mmio + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE, val);
pll_write(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0,
cached->bit_clk_div | (cached->pix_clk_div << 4));
val = pll_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
val &= ~0x3;
val |= cached->pll_mux;
pll_write(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, val);
DBG("DSI PLL%d", pll_7nm->id);
return 0;
}
static int dsi_pll_7nm_set_usecase(struct msm_dsi_pll *pll,
enum msm_dsi_phy_usecase uc)
{
struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
void __iomem *base = pll_7nm->phy_cmn_mmio;
u32 data = 0x0; /* internal PLL */
DBG("DSI PLL%d", pll_7nm->id);
switch (uc) {
case MSM_DSI_PHY_STANDALONE:
break;
case MSM_DSI_PHY_MASTER:
pll_7nm->slave = pll_7nm_list[(pll_7nm->id + 1) % DSI_MAX];
break;
case MSM_DSI_PHY_SLAVE:
data = 0x1; /* external PLL */
break;
default:
return -EINVAL;
}
/* set PLL src */
pll_write(base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, (data << 2));
pll_7nm->uc = uc;
return 0;
}
static int dsi_pll_7nm_get_provider(struct msm_dsi_pll *pll,
struct clk **byte_clk_provider,
struct clk **pixel_clk_provider)
{
struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
struct clk_hw_onecell_data *hw_data = pll_7nm->hw_data;
DBG("DSI PLL%d", pll_7nm->id);
if (byte_clk_provider)
*byte_clk_provider = hw_data->hws[DSI_BYTE_PLL_CLK]->clk;
if (pixel_clk_provider)
*pixel_clk_provider = hw_data->hws[DSI_PIXEL_PLL_CLK]->clk;
return 0;
}
static void dsi_pll_7nm_destroy(struct msm_dsi_pll *pll)
{
struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
struct device *dev = &pll_7nm->pdev->dev;
DBG("DSI PLL%d", pll_7nm->id);
of_clk_del_provider(dev->of_node);
clk_hw_unregister_divider(pll_7nm->out_dsiclk_hw);
clk_hw_unregister_mux(pll_7nm->pclk_mux_hw);
clk_hw_unregister_fixed_factor(pll_7nm->post_out_div_clk_hw);
clk_hw_unregister_fixed_factor(pll_7nm->by_2_bit_clk_hw);
clk_hw_unregister_fixed_factor(pll_7nm->byte_clk_hw);
clk_hw_unregister_divider(pll_7nm->bit_clk_hw);
clk_hw_unregister_divider(pll_7nm->out_div_clk_hw);
clk_hw_unregister(&pll_7nm->base.clk_hw);
}
/*
* The post dividers and mux clocks are created using the standard divider and
* mux API. Unlike the 14nm PHY, the slave PLL doesn't need its dividers/mux
* state to follow the master PLL's divider/mux state. Therefore, we don't
* require special clock ops that also configure the slave PLL registers
*/
static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm)
{
char clk_name[32], parent[32], vco_name[32];
char parent2[32], parent3[32], parent4[32];
struct clk_init_data vco_init = {
.parent_names = (const char *[]){ "bi_tcxo" },
.num_parents = 1,
.name = vco_name,
.flags = CLK_IGNORE_UNUSED,
.ops = &clk_ops_dsi_pll_7nm_vco,
};
struct device *dev = &pll_7nm->pdev->dev;
struct clk_hw_onecell_data *hw_data;
struct clk_hw *hw;
int ret;
DBG("DSI%d", pll_7nm->id);
hw_data = devm_kzalloc(dev, sizeof(*hw_data) +
NUM_PROVIDED_CLKS * sizeof(struct clk_hw *),
GFP_KERNEL);
if (!hw_data)
return -ENOMEM;
snprintf(vco_name, 32, "dsi%dvco_clk", pll_7nm->id);
pll_7nm->base.clk_hw.init = &vco_init;
ret = clk_hw_register(dev, &pll_7nm->base.clk_hw);
if (ret)
return ret;
snprintf(clk_name, 32, "dsi%d_pll_out_div_clk", pll_7nm->id);
snprintf(parent, 32, "dsi%dvco_clk", pll_7nm->id);
hw = clk_hw_register_divider(dev, clk_name,
parent, CLK_SET_RATE_PARENT,
pll_7nm->mmio +
REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE,
0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto err_base_clk_hw;
}
pll_7nm->out_div_clk_hw = hw;
snprintf(clk_name, 32, "dsi%d_pll_bit_clk", pll_7nm->id);
snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_7nm->id);
/* BIT CLK: DIV_CTRL_3_0 */
hw = clk_hw_register_divider(dev, clk_name, parent,
CLK_SET_RATE_PARENT,
pll_7nm->phy_cmn_mmio +
REG_DSI_7nm_PHY_CMN_CLK_CFG0,
0, 4, CLK_DIVIDER_ONE_BASED,
&pll_7nm->postdiv_lock);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto err_out_div_clk_hw;
}
pll_7nm->bit_clk_hw = hw;
snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_7nm->id);
snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->id);
/* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
CLK_SET_RATE_PARENT, 1, 8);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto err_bit_clk_hw;
}
pll_7nm->byte_clk_hw = hw;
hw_data->hws[DSI_BYTE_PLL_CLK] = hw;
snprintf(clk_name, 32, "dsi%d_pll_by_2_bit_clk", pll_7nm->id);
snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->id);
hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
0, 1, 2);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto err_byte_clk_hw;
}
pll_7nm->by_2_bit_clk_hw = hw;
snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->id);
snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_7nm->id);
hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
0, 1, 4);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto err_by_2_bit_clk_hw;
}
pll_7nm->post_out_div_clk_hw = hw;
snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_7nm->id);
snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->id);
snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_7nm->id);
snprintf(parent3, 32, "dsi%d_pll_out_div_clk", pll_7nm->id);
snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->id);
hw = clk_hw_register_mux(dev, clk_name,
((const char *[]){
parent, parent2, parent3, parent4
}), 4, 0, pll_7nm->phy_cmn_mmio +
REG_DSI_7nm_PHY_CMN_CLK_CFG1,
0, 2, 0, NULL);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto err_post_out_div_clk_hw;
}
pll_7nm->pclk_mux_hw = hw;
snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_7nm->id);
snprintf(parent, 32, "dsi%d_pclk_mux", pll_7nm->id);
/* PIX CLK DIV : DIV_CTRL_7_4*/
hw = clk_hw_register_divider(dev, clk_name, parent,
0, pll_7nm->phy_cmn_mmio +
REG_DSI_7nm_PHY_CMN_CLK_CFG0,
4, 4, CLK_DIVIDER_ONE_BASED,
&pll_7nm->postdiv_lock);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto err_pclk_mux_hw;
}
pll_7nm->out_dsiclk_hw = hw;
hw_data->hws[DSI_PIXEL_PLL_CLK] = hw;
hw_data->num = NUM_PROVIDED_CLKS;
pll_7nm->hw_data = hw_data;
ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
pll_7nm->hw_data);
if (ret) {
DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
goto err_dsiclk_hw;
}
return 0;
err_dsiclk_hw:
clk_hw_unregister_divider(pll_7nm->out_dsiclk_hw);
err_pclk_mux_hw:
clk_hw_unregister_mux(pll_7nm->pclk_mux_hw);
err_post_out_div_clk_hw:
clk_hw_unregister_fixed_factor(pll_7nm->post_out_div_clk_hw);
err_by_2_bit_clk_hw:
clk_hw_unregister_fixed_factor(pll_7nm->by_2_bit_clk_hw);
err_byte_clk_hw:
clk_hw_unregister_fixed_factor(pll_7nm->byte_clk_hw);
err_bit_clk_hw:
clk_hw_unregister_divider(pll_7nm->bit_clk_hw);
err_out_div_clk_hw:
clk_hw_unregister_divider(pll_7nm->out_div_clk_hw);
err_base_clk_hw:
clk_hw_unregister(&pll_7nm->base.clk_hw);
return ret;
}
struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id)
{
struct dsi_pll_7nm *pll_7nm;
struct msm_dsi_pll *pll;
int ret;
pll_7nm = devm_kzalloc(&pdev->dev, sizeof(*pll_7nm), GFP_KERNEL);
if (!pll_7nm)
return ERR_PTR(-ENOMEM);
DBG("DSI PLL%d", id);
pll_7nm->pdev = pdev;
pll_7nm->id = id;
pll_7nm_list[id] = pll_7nm;
pll_7nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
if (IS_ERR_OR_NULL(pll_7nm->phy_cmn_mmio)) {
DRM_DEV_ERROR(&pdev->dev, "failed to map CMN PHY base\n");
return ERR_PTR(-ENOMEM);
}
pll_7nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
if (IS_ERR_OR_NULL(pll_7nm->mmio)) {
DRM_DEV_ERROR(&pdev->dev, "failed to map PLL base\n");
return ERR_PTR(-ENOMEM);
}
spin_lock_init(&pll_7nm->postdiv_lock);
pll = &pll_7nm->base;
pll->min_rate = 1000000000UL;
pll->max_rate = 3500000000UL;
if (pll->type == MSM_DSI_PHY_7NM_V4_1) {
pll->min_rate = 600000000UL;
pll->max_rate = 5000000000UL;
/* workaround for max rate overflowing on 32-bit builds: */
pll->max_rate = max(pll->max_rate, 0xffffffffUL);
}
pll->get_provider = dsi_pll_7nm_get_provider;
pll->destroy = dsi_pll_7nm_destroy;
pll->save_state = dsi_pll_7nm_save_state;
pll->restore_state = dsi_pll_7nm_restore_state;
pll->set_usecase = dsi_pll_7nm_set_usecase;
pll_7nm->vco_delay = 1;
ret = pll_7nm_register(pll_7nm);
if (ret) {
DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
return ERR_PTR(ret);
}
/* TODO: Remove this when we have proper display handover support */
msm_dsi_pll_save_state(pll);
return pll;
}

View File

@ -453,15 +453,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
if (ret)
goto err_msm_uninit;
if (!dev->dma_parms) {
dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
GFP_KERNEL);
if (!dev->dma_parms) {
ret = -ENOMEM;
goto err_msm_uninit;
}
}
dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
dma_set_max_seg_size(dev, UINT_MAX);
msm_gem_shrinker_init(ddev);
@ -594,9 +586,10 @@ static int context_init(struct drm_device *dev, struct drm_file *file)
if (!ctx)
return -ENOMEM;
kref_init(&ctx->ref);
msm_submitqueue_init(dev, ctx);
ctx->aspace = priv->gpu ? priv->gpu->aspace : NULL;
ctx->aspace = msm_gpu_create_private_address_space(priv->gpu, current);
file->driver_priv = ctx;
return 0;
@ -615,7 +608,7 @@ static int msm_open(struct drm_device *dev, struct drm_file *file)
static void context_close(struct msm_file_private *ctx)
{
msm_submitqueue_close(ctx);
kfree(ctx);
msm_file_private_put(ctx);
}
static void msm_postclose(struct drm_device *dev, struct drm_file *file)
@ -779,18 +772,19 @@ static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
}
static int msm_ioctl_gem_info_iova(struct drm_device *dev,
struct drm_gem_object *obj, uint64_t *iova)
struct drm_file *file, struct drm_gem_object *obj,
uint64_t *iova)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_file_private *ctx = file->driver_priv;
if (!priv->gpu)
if (!ctx->aspace)
return -EINVAL;
/*
* Don't pin the memory here - just get an address so that userspace can
* be productive
*/
return msm_gem_get_iova(obj, priv->gpu->aspace, iova);
return msm_gem_get_iova(obj, ctx->aspace, iova);
}
static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
@ -829,7 +823,7 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
args->value = msm_gem_mmap_offset(obj);
break;
case MSM_INFO_GET_IOVA:
ret = msm_ioctl_gem_info_iova(dev, obj, &args->value);
ret = msm_ioctl_gem_info_iova(dev, file, obj, &args->value);
break;
case MSM_INFO_SET_NAME:
/* length check should leave room for terminating null: */
@ -1358,6 +1352,7 @@ static int __init msm_drm_register(void)
msm_dsi_register();
msm_edp_register();
msm_hdmi_register();
msm_dp_register();
adreno_register();
return platform_driver_register(&msm_platform_driver);
}
@ -1366,6 +1361,7 @@ static void __exit msm_drm_unregister(void)
{
DBG("fini");
platform_driver_unregister(&msm_platform_driver);
msm_dp_unregister();
msm_hdmi_unregister();
adreno_unregister();
msm_edp_unregister();

View File

@ -57,6 +57,7 @@ struct msm_file_private {
struct list_head submitqueues;
int queueid;
struct msm_gem_address_space *aspace;
struct kref ref;
};
enum msm_mdp_plane_property {
@ -159,6 +160,8 @@ struct msm_drm_private {
/* DSI is shared by mdp4 and mdp5 */
struct msm_dsi *dsi[2];
struct msm_dp *dp;
/* when we have more than one 'msm_gpu' these need to be an array: */
struct msm_gpu *gpu;
struct msm_file_private *lastctx;
@ -248,6 +251,10 @@ int msm_gem_map_vma(struct msm_gem_address_space *aspace,
void msm_gem_close_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma);
struct msm_gem_address_space *
msm_gem_address_space_get(struct msm_gem_address_space *aspace);
void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
struct msm_gem_address_space *
@ -302,9 +309,8 @@ void msm_gem_put_vaddr(struct drm_gem_object *obj);
int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
int msm_gem_sync_object(struct drm_gem_object *obj,
struct msm_fence_context *fctx, bool exclusive);
void msm_gem_move_to_active(struct drm_gem_object *obj,
struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence);
void msm_gem_move_to_inactive(struct drm_gem_object *obj);
void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu);
void msm_gem_active_put(struct drm_gem_object *obj);
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
int msm_gem_cpu_fini(struct drm_gem_object *obj);
void msm_gem_free_object(struct drm_gem_object *obj);
@ -378,6 +384,63 @@ static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi,
}
#endif
#ifdef CONFIG_DRM_MSM_DP
int __init msm_dp_register(void);
void __exit msm_dp_unregister(void);
int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
struct drm_encoder *encoder);
int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder);
int msm_dp_display_disable(struct msm_dp *dp, struct drm_encoder *encoder);
int msm_dp_display_pre_disable(struct msm_dp *dp, struct drm_encoder *encoder);
void msm_dp_display_mode_set(struct msm_dp *dp, struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
void msm_dp_irq_postinstall(struct msm_dp *dp_display);
void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor);
#else
static inline int __init msm_dp_register(void)
{
return -EINVAL;
}
static inline void __exit msm_dp_unregister(void)
{
}
static inline int msm_dp_modeset_init(struct msm_dp *dp_display,
struct drm_device *dev,
struct drm_encoder *encoder)
{
return -EINVAL;
}
static inline int msm_dp_display_enable(struct msm_dp *dp,
struct drm_encoder *encoder)
{
return -EINVAL;
}
static inline int msm_dp_display_disable(struct msm_dp *dp,
struct drm_encoder *encoder)
{
return -EINVAL;
}
static inline void msm_dp_display_mode_set(struct msm_dp *dp,
struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
}
static inline void msm_dp_irq_postinstall(struct msm_dp *dp_display)
{
}
static inline void msm_dp_debugfs_init(struct msm_dp *dp_display,
struct drm_minor *minor)
{
}
#endif
void __init msm_mdp_register(void);
void __exit msm_mdp_unregister(void);
void __init msm_dpu_register(void);
@ -398,8 +461,9 @@ void msm_perf_debugfs_cleanup(struct msm_drm_private *priv);
#else
static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; }
__printf(3, 4)
static inline void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
const char *fmt, ...) {}
static inline void msm_rd_dump_submit(struct msm_rd_state *rd,
struct msm_gem_submit *submit,
const char *fmt, ...) {}
static inline void msm_rd_debugfs_cleanup(struct msm_drm_private *priv) {}
static inline void msm_perf_debugfs_cleanup(struct msm_drm_private *priv) {}
#endif
@ -419,7 +483,8 @@ struct msm_gpu_submitqueue;
int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx);
struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
u32 id);
int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
int msm_submitqueue_create(struct drm_device *drm,
struct msm_file_private *ctx,
u32 prio, u32 flags, u32 *id);
int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
struct drm_msm_submitqueue_query *args);
@ -428,6 +493,26 @@ void msm_submitqueue_close(struct msm_file_private *ctx);
void msm_submitqueue_destroy(struct kref *kref);
static inline void __msm_file_private_destroy(struct kref *kref)
{
struct msm_file_private *ctx = container_of(kref,
struct msm_file_private, ref);
msm_gem_address_space_put(ctx->aspace);
kfree(ctx);
}
static inline void msm_file_private_put(struct msm_file_private *ctx)
{
kref_put(&ctx->ref, __msm_file_private_destroy);
}
static inline struct msm_file_private *msm_file_private_get(
struct msm_file_private *ctx)
{
kref_get(&ctx->ref);
return ctx;
}
#define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
#define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)

View File

@ -52,23 +52,14 @@ static void sync_for_device(struct msm_gem_object *msm_obj)
{
struct device *dev = msm_obj->base.dev->dev;
if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
dma_sync_sgtable_for_device(dev, msm_obj->sgt,
DMA_BIDIRECTIONAL);
} else {
dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
}
dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
}
static void sync_for_cpu(struct msm_gem_object *msm_obj)
{
struct device *dev = msm_obj->base.dev->dev;
if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
dma_sync_sgtable_for_cpu(dev, msm_obj->sgt, DMA_BIDIRECTIONAL);
} else {
dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
}
dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
}
/* allocate pages from VRAM carveout, used when no IOMMU: */
@ -750,31 +741,31 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
return 0;
}
void msm_gem_move_to_active(struct drm_gem_object *obj,
struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
msm_obj->gpu = gpu;
if (exclusive)
dma_resv_add_excl_fence(obj->resv, fence);
else
dma_resv_add_shared_fence(obj->resv, fence);
list_del_init(&msm_obj->mm_list);
list_add_tail(&msm_obj->mm_list, &gpu->active_list);
if (!atomic_fetch_inc(&msm_obj->active_count)) {
msm_obj->gpu = gpu;
list_del_init(&msm_obj->mm_list);
list_add_tail(&msm_obj->mm_list, &gpu->active_list);
}
}
void msm_gem_move_to_inactive(struct drm_gem_object *obj)
void msm_gem_active_put(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_drm_private *priv = obj->dev->dev_private;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
msm_obj->gpu = NULL;
list_del_init(&msm_obj->mm_list);
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
if (!atomic_dec_return(&msm_obj->active_count)) {
msm_obj->gpu = NULL;
list_del_init(&msm_obj->mm_list);
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
}
}
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
@ -849,11 +840,28 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
seq_puts(m, " vmas:");
list_for_each_entry(vma, &msm_obj->vmas, list)
seq_printf(m, " [%s: %08llx,%s,inuse=%d]",
vma->aspace != NULL ? vma->aspace->name : NULL,
vma->iova, vma->mapped ? "mapped" : "unmapped",
list_for_each_entry(vma, &msm_obj->vmas, list) {
const char *name, *comm;
if (vma->aspace) {
struct msm_gem_address_space *aspace = vma->aspace;
struct task_struct *task =
get_pid_task(aspace->pid, PIDTYPE_PID);
if (task) {
comm = kstrdup(task->comm, GFP_KERNEL);
} else {
comm = NULL;
}
name = aspace->name;
} else {
name = comm = NULL;
}
seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
name, comm ? ":" : "", comm ? comm : "",
vma->aspace, vma->iova,
vma->mapped ? "mapped" : "unmapped",
vma->inuse);
kfree(comm);
}
seq_puts(m, "\n");
}

View File

@ -24,6 +24,11 @@ struct msm_gem_address_space {
spinlock_t lock; /* Protects drm_mm node allocation/removal */
struct msm_mmu *mmu;
struct kref kref;
/* For address spaces associated with a specific process, this
* will be non-NULL:
*/
struct pid *pid;
};
struct msm_gem_vma {
@ -83,12 +88,14 @@ struct msm_gem_object {
struct mutex lock; /* Protects resources associated with bo */
char name[32]; /* Identifier to print for the debugfs files */
atomic_t active_count;
};
#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
static inline bool is_active(struct msm_gem_object *msm_obj)
{
return msm_obj->gpu != NULL;
return atomic_read(&msm_obj->active_count);
}
static inline bool is_purgeable(struct msm_gem_object *msm_obj)
@ -142,6 +149,7 @@ struct msm_gem_submit {
bool valid; /* true if no cmdstream patching needed */
bool in_rb; /* "sudo" mode, copy cmds into RB */
struct msm_ringbuffer *ring;
struct msm_file_private *ctx;
unsigned int nr_cmds;
unsigned int nr_bos;
u32 ident; /* A "identifier" for the submit for logging */

View File

@ -6,6 +6,7 @@
#include "msm_drv.h"
#include "msm_gem.h"
#include "msm_gpu_trace.h"
static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
{
@ -87,7 +88,7 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
mutex_unlock(&dev->struct_mutex);
if (freed > 0)
pr_info_ratelimited("Purging %lu bytes\n", freed << PAGE_SHIFT);
trace_msm_gem_purge(freed << PAGE_SHIFT);
return freed;
}
@ -123,7 +124,7 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
*(unsigned long *)ptr += unmapped;
if (unmapped > 0)
pr_info_ratelimited("Purging %u vmaps\n", unmapped);
trace_msm_gem_purge_vmaps(unmapped);
return NOTIFY_DONE;
}

View File

@ -27,7 +27,7 @@
#define BO_PINNED 0x2000
static struct msm_gem_submit *submit_create(struct drm_device *dev,
struct msm_gpu *gpu, struct msm_gem_address_space *aspace,
struct msm_gpu *gpu,
struct msm_gpu_submitqueue *queue, uint32_t nr_bos,
uint32_t nr_cmds)
{
@ -43,7 +43,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
return NULL;
submit->dev = dev;
submit->aspace = aspace;
submit->aspace = queue->ctx->aspace;
submit->gpu = gpu;
submit->fence = NULL;
submit->cmd = (void *)&submit->bos[nr_bos];
@ -677,7 +677,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
}
}
submit = submit_create(dev, gpu, ctx->aspace, queue, args->nr_bos,
submit = submit_create(dev, gpu, queue, args->nr_bos,
args->nr_cmds);
if (!submit) {
ret = -ENOMEM;
@ -785,7 +785,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
}
}
msm_gpu_submit(gpu, submit, ctx);
msm_gpu_submit(gpu, submit);
args->fence = submit->fence->seqno;

View File

@ -17,6 +17,7 @@ msm_gem_address_space_destroy(struct kref *kref)
drm_mm_takedown(&aspace->mm);
if (aspace->mmu)
aspace->mmu->funcs->destroy(aspace->mmu);
put_pid(aspace->pid);
kfree(aspace);
}
@ -27,6 +28,15 @@ void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
kref_put(&aspace->kref, msm_gem_address_space_destroy);
}
struct msm_gem_address_space *
msm_gem_address_space_get(struct msm_gem_address_space *aspace)
{
if (!IS_ERR_OR_NULL(aspace))
kref_get(&aspace->kref);
return aspace;
}
/* Actually unmap memory for the vma */
void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma)
@ -78,8 +88,10 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
size, prot);
if (ret)
if (ret) {
vma->mapped = false;
vma->inuse--;
}
return ret;
}

View File

@ -24,7 +24,7 @@
static int msm_devfreq_target(struct device *dev, unsigned long *freq,
u32 flags)
{
struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
struct msm_gpu *gpu = dev_to_gpu(dev);
struct dev_pm_opp *opp;
opp = devfreq_recommended_opp(dev, freq, flags);
@ -32,6 +32,8 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq,
if (IS_ERR(opp))
return PTR_ERR(opp);
trace_msm_gpu_freq_change(dev_pm_opp_get_freq(opp));
if (gpu->funcs->gpu_set_freq)
gpu->funcs->gpu_set_freq(gpu, opp);
else
@ -45,7 +47,7 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq,
static int msm_devfreq_get_dev_status(struct device *dev,
struct devfreq_dev_status *status)
{
struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
struct msm_gpu *gpu = dev_to_gpu(dev);
ktime_t time;
if (gpu->funcs->gpu_get_freq)
@ -64,7 +66,7 @@ static int msm_devfreq_get_dev_status(struct device *dev,
static int msm_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
{
struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
struct msm_gpu *gpu = dev_to_gpu(dev);
if (gpu->funcs->gpu_get_freq)
*freq = gpu->funcs->gpu_get_freq(gpu);
@ -200,6 +202,7 @@ int msm_gpu_pm_resume(struct msm_gpu *gpu)
int ret;
DBG("%s", gpu->name);
trace_msm_gpu_resume(0);
ret = enable_pwrrail(gpu);
if (ret)
@ -225,6 +228,7 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu)
int ret;
DBG("%s", gpu->name);
trace_msm_gpu_suspend(0);
devfreq_suspend_device(gpu->devfreq.devfreq);
@ -520,7 +524,7 @@ static void recover_worker(struct work_struct *work)
struct msm_ringbuffer *ring = gpu->rb[i];
list_for_each_entry(submit, &ring->submits, node)
gpu->funcs->submit(gpu, submit, NULL);
gpu->funcs->submit(gpu, submit);
}
}
@ -694,8 +698,8 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
/* move to inactive: */
msm_gem_move_to_inactive(&msm_obj->base);
msm_gem_active_put(&msm_obj->base);
msm_gem_unpin_iova(&msm_obj->base, submit->aspace);
drm_gem_object_put_locked(&msm_obj->base);
}
@ -747,8 +751,7 @@ void msm_gpu_retire(struct msm_gpu *gpu)
}
/* add bo's to gpu's ring, and kick gpu: */
void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx)
void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
struct drm_device *dev = gpu->dev;
struct msm_drm_private *priv = dev->dev_private;
@ -771,6 +774,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
struct drm_gem_object *drm_obj = &msm_obj->base;
uint64_t iova;
/* can't happen yet.. but when we add 2d support we'll have
@ -783,13 +787,15 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
msm_gem_get_and_pin_iova(&msm_obj->base, submit->aspace, &iova);
if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
dma_resv_add_excl_fence(drm_obj->resv, submit->fence);
else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence);
dma_resv_add_shared_fence(drm_obj->resv, submit->fence);
msm_gem_active_get(drm_obj, gpu);
}
gpu->funcs->submit(gpu, submit, ctx);
priv->lastctx = ctx;
gpu->funcs->submit(gpu, submit);
priv->lastctx = submit->queue->ctx;
hangcheck_timer_reset(gpu);
}
@ -824,6 +830,30 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
return 0;
}
/* Return a new address space for a msm_drm_private instance */
struct msm_gem_address_space *
msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task)
{
struct msm_gem_address_space *aspace = NULL;
if (!gpu)
return NULL;
/*
* If the target doesn't support private address spaces then return
* the global one
*/
if (gpu->funcs->create_private_address_space) {
aspace = gpu->funcs->create_private_address_space(gpu);
if (!IS_ERR(aspace))
aspace->pid = get_pid(task_pid(task));
}
if (IS_ERR_OR_NULL(aspace))
aspace = msm_gem_address_space_get(gpu->aspace);
return aspace;
}
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
const char *name, struct msm_gpu_config *config)
@ -892,7 +922,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
gpu->gpu_cx = NULL;
gpu->pdev = pdev;
platform_set_drvdata(pdev, gpu);
platform_set_drvdata(pdev, &gpu->adreno_smmu);
msm_devfreq_init(gpu);

View File

@ -7,6 +7,7 @@
#ifndef __MSM_GPU_H__
#define __MSM_GPU_H__
#include <linux/adreno-smmu-priv.h>
#include <linux/clk.h>
#include <linux/interconnect.h>
#include <linux/pm_opp.h>
@ -45,8 +46,7 @@ struct msm_gpu_funcs {
int (*hw_init)(struct msm_gpu *gpu);
int (*pm_suspend)(struct msm_gpu *gpu);
int (*pm_resume)(struct msm_gpu *gpu);
void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx);
void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit);
void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
irqreturn_t (*irq)(struct msm_gpu *irq);
struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
@ -66,6 +66,9 @@ struct msm_gpu_funcs {
void (*gpu_set_freq)(struct msm_gpu *gpu, struct dev_pm_opp *opp);
struct msm_gem_address_space *(*create_address_space)
(struct msm_gpu *gpu, struct platform_device *pdev);
struct msm_gem_address_space *(*create_private_address_space)
(struct msm_gpu *gpu);
uint32_t (*get_rptr)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
};
struct msm_gpu {
@ -74,6 +77,8 @@ struct msm_gpu {
struct platform_device *pdev;
const struct msm_gpu_funcs *funcs;
struct adreno_smmu_priv adreno_smmu;
/* performance counters (hw & sw): */
spinlock_t perf_lock;
bool perfcntr_active;
@ -144,6 +149,12 @@ struct msm_gpu {
bool hw_apriv;
};
static inline struct msm_gpu *dev_to_gpu(struct device *dev)
{
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(dev);
return container_of(adreno_smmu, struct msm_gpu, adreno_smmu);
}
/* It turns out that all targets use the same ringbuffer size */
#define MSM_GPU_RINGBUFFER_SZ SZ_32K
#define MSM_GPU_RINGBUFFER_BLKSIZE 32
@ -184,6 +195,7 @@ struct msm_gpu_submitqueue {
u32 flags;
u32 prio;
int faults;
struct msm_file_private *ctx;
struct list_head node;
struct kref ref;
};
@ -283,13 +295,15 @@ int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs);
void msm_gpu_retire(struct msm_gpu *gpu);
void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx);
void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit);
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
const char *name, struct msm_gpu_config *config);
struct msm_gem_address_space *
msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task);
void msm_gpu_cleanup(struct msm_gpu *gpu);
struct msm_gpu *adreno_load_gpu(struct drm_device *dev);

View File

@ -83,6 +83,89 @@ TRACE_EVENT(msm_gpu_submit_retired,
__entry->start_ticks, __entry->end_ticks)
);
TRACE_EVENT(msm_gpu_freq_change,
TP_PROTO(u32 freq),
TP_ARGS(freq),
TP_STRUCT__entry(
__field(u32, freq)
),
TP_fast_assign(
/* trace freq in MHz to match intel_gpu_freq_change, to make life easier
* for userspace
*/
__entry->freq = DIV_ROUND_UP(freq, 1000000);
),
TP_printk("new_freq=%u", __entry->freq)
);
TRACE_EVENT(msm_gmu_freq_change,
TP_PROTO(u32 freq, u32 perf_index),
TP_ARGS(freq, perf_index),
TP_STRUCT__entry(
__field(u32, freq)
__field(u32, perf_index)
),
TP_fast_assign(
__entry->freq = freq;
__entry->perf_index = perf_index;
),
TP_printk("freq=%u, perf_index=%u", __entry->freq, __entry->perf_index)
);
TRACE_EVENT(msm_gem_purge,
TP_PROTO(u32 bytes),
TP_ARGS(bytes),
TP_STRUCT__entry(
__field(u32, bytes)
),
TP_fast_assign(
__entry->bytes = bytes;
),
TP_printk("Purging %u bytes", __entry->bytes)
);
TRACE_EVENT(msm_gem_purge_vmaps,
TP_PROTO(u32 unmapped),
TP_ARGS(unmapped),
TP_STRUCT__entry(
__field(u32, unmapped)
),
TP_fast_assign(
__entry->unmapped = unmapped;
),
TP_printk("Purging %u vmaps", __entry->unmapped)
);
TRACE_EVENT(msm_gpu_suspend,
TP_PROTO(int dummy),
TP_ARGS(dummy),
TP_STRUCT__entry(
__field(u32, dummy)
),
TP_fast_assign(
__entry->dummy = dummy;
),
TP_printk("%u", __entry->dummy)
);
TRACE_EVENT(msm_gpu_resume,
TP_PROTO(int dummy),
TP_ARGS(dummy),
TP_STRUCT__entry(
__field(u32, dummy)
),
TP_fast_assign(
__entry->dummy = dummy;
),
TP_printk("%u", __entry->dummy)
);
#endif
#undef TRACE_INCLUDE_PATH

View File

@ -101,7 +101,7 @@ struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu)
}
gpummu->gpu = gpu;
msm_mmu_init(&gpummu->base, dev, &funcs);
msm_mmu_init(&gpummu->base, dev, &funcs, MSM_MMU_GPUMMU);
return &gpummu->base;
}

View File

@ -4,15 +4,210 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
#include <linux/adreno-smmu-priv.h>
#include <linux/io-pgtable.h>
#include "msm_drv.h"
#include "msm_mmu.h"
struct msm_iommu {
struct msm_mmu base;
struct iommu_domain *domain;
atomic_t pagetables;
};
#define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
struct msm_iommu_pagetable {
struct msm_mmu base;
struct msm_mmu *parent;
struct io_pgtable_ops *pgtbl_ops;
phys_addr_t ttbr;
u32 asid;
};
static struct msm_iommu_pagetable *to_pagetable(struct msm_mmu *mmu)
{
return container_of(mmu, struct msm_iommu_pagetable, base);
}
static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova,
size_t size)
{
struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
size_t unmapped = 0;
/* Unmap the block one page at a time */
while (size) {
unmapped += ops->unmap(ops, iova, 4096, NULL);
iova += 4096;
size -= 4096;
}
iommu_flush_tlb_all(to_msm_iommu(pagetable->parent)->domain);
return (unmapped == size) ? 0 : -EINVAL;
}
static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
struct sg_table *sgt, size_t len, int prot)
{
struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
struct scatterlist *sg;
size_t mapped = 0;
u64 addr = iova;
unsigned int i;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
size_t size = sg->length;
phys_addr_t phys = sg_phys(sg);
/* Map the block one page at a time */
while (size) {
if (ops->map(ops, addr, phys, 4096, prot, GFP_KERNEL)) {
msm_iommu_pagetable_unmap(mmu, iova, mapped);
return -EINVAL;
}
phys += 4096;
addr += 4096;
size -= 4096;
mapped += 4096;
}
}
return 0;
}
static void msm_iommu_pagetable_destroy(struct msm_mmu *mmu)
{
struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
struct msm_iommu *iommu = to_msm_iommu(pagetable->parent);
struct adreno_smmu_priv *adreno_smmu =
dev_get_drvdata(pagetable->parent->dev);
/*
* If this is the last attached pagetable for the parent,
* disable TTBR0 in the arm-smmu driver
*/
if (atomic_dec_return(&iommu->pagetables) == 0)
adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, NULL);
free_io_pgtable_ops(pagetable->pgtbl_ops);
kfree(pagetable);
}
int msm_iommu_pagetable_params(struct msm_mmu *mmu,
phys_addr_t *ttbr, int *asid)
{
struct msm_iommu_pagetable *pagetable;
if (mmu->type != MSM_MMU_IOMMU_PAGETABLE)
return -EINVAL;
pagetable = to_pagetable(mmu);
if (ttbr)
*ttbr = pagetable->ttbr;
if (asid)
*asid = pagetable->asid;
return 0;
}
static const struct msm_mmu_funcs pagetable_funcs = {
.map = msm_iommu_pagetable_map,
.unmap = msm_iommu_pagetable_unmap,
.destroy = msm_iommu_pagetable_destroy,
};
static void msm_iommu_tlb_flush_all(void *cookie)
{
}
static void msm_iommu_tlb_flush_walk(unsigned long iova, size_t size,
size_t granule, void *cookie)
{
}
static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
unsigned long iova, size_t granule, void *cookie)
{
}
static const struct iommu_flush_ops null_tlb_ops = {
.tlb_flush_all = msm_iommu_tlb_flush_all,
.tlb_flush_walk = msm_iommu_tlb_flush_walk,
.tlb_flush_leaf = msm_iommu_tlb_flush_walk,
.tlb_add_page = msm_iommu_tlb_add_page,
};
struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
{
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(parent->dev);
struct msm_iommu *iommu = to_msm_iommu(parent);
struct msm_iommu_pagetable *pagetable;
const struct io_pgtable_cfg *ttbr1_cfg = NULL;
struct io_pgtable_cfg ttbr0_cfg;
int ret;
/* Get the pagetable configuration from the domain */
if (adreno_smmu->cookie)
ttbr1_cfg = adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie);
if (!ttbr1_cfg)
return ERR_PTR(-ENODEV);
pagetable = kzalloc(sizeof(*pagetable), GFP_KERNEL);
if (!pagetable)
return ERR_PTR(-ENOMEM);
msm_mmu_init(&pagetable->base, parent->dev, &pagetable_funcs,
MSM_MMU_IOMMU_PAGETABLE);
/* Clone the TTBR1 cfg as starting point for TTBR0 cfg: */
ttbr0_cfg = *ttbr1_cfg;
/* The incoming cfg will have the TTBR1 quirk enabled */
ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1;
ttbr0_cfg.tlb = &null_tlb_ops;
pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1,
&ttbr0_cfg, iommu->domain);
if (!pagetable->pgtbl_ops) {
kfree(pagetable);
return ERR_PTR(-ENOMEM);
}
/*
* If this is the first pagetable that we've allocated, send it back to
* the arm-smmu driver as a trigger to set up TTBR0
*/
if (atomic_inc_return(&iommu->pagetables) == 1) {
ret = adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, &ttbr0_cfg);
if (ret) {
free_io_pgtable_ops(pagetable->pgtbl_ops);
kfree(pagetable);
return ERR_PTR(ret);
}
}
/* Needed later for TLB flush */
pagetable->parent = parent;
pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr;
/*
* TODO we would like each set of page tables to have a unique ASID
* to optimize TLB invalidation. But iommu_flush_tlb_all() will
* end up flushing the ASID used for TTBR1 pagetables, which is not
* what we want. So for now just use the same ASID as TTBR1.
*/
pagetable->asid = 0;
return &pagetable->base;
}
static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
unsigned long iova, int flags, void *arg)
{
@ -36,6 +231,10 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
struct msm_iommu *iommu = to_msm_iommu(mmu);
size_t ret;
/* The arm-smmu driver expects the addresses to be sign extended */
if (iova & BIT_ULL(48))
iova |= GENMASK_ULL(63, 49);
ret = iommu_map_sgtable(iommu->domain, iova, sgt, prot);
WARN_ON(!ret);
@ -46,6 +245,9 @@ static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
if (iova & BIT_ULL(48))
iova |= GENMASK_ULL(63, 49);
iommu_unmap(iommu->domain, iova, len);
return 0;
@ -78,9 +280,11 @@ struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
return ERR_PTR(-ENOMEM);
iommu->domain = domain;
msm_mmu_init(&iommu->base, dev, &funcs);
msm_mmu_init(&iommu->base, dev, &funcs, MSM_MMU_IOMMU);
iommu_set_fault_handler(domain, msm_fault_handler, iommu);
atomic_set(&iommu->pagetables, 0);
ret = iommu_attach_device(iommu->domain, dev);
if (ret) {
kfree(iommu);

View File

@ -17,18 +17,26 @@ struct msm_mmu_funcs {
void (*destroy)(struct msm_mmu *mmu);
};
enum msm_mmu_type {
MSM_MMU_GPUMMU,
MSM_MMU_IOMMU,
MSM_MMU_IOMMU_PAGETABLE,
};
struct msm_mmu {
const struct msm_mmu_funcs *funcs;
struct device *dev;
int (*handler)(void *arg, unsigned long iova, int flags);
void *arg;
enum msm_mmu_type type;
};
static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
const struct msm_mmu_funcs *funcs)
const struct msm_mmu_funcs *funcs, enum msm_mmu_type type)
{
mmu->dev = dev;
mmu->funcs = funcs;
mmu->type = type;
}
struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain);
@ -41,7 +49,13 @@ static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
mmu->handler = handler;
}
struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent);
void msm_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base,
dma_addr_t *tran_error);
int msm_iommu_pagetable_params(struct msm_mmu *mmu, phys_addr_t *ttbr,
int *asid);
#endif /* __MSM_MMU_H__ */

View File

@ -31,6 +31,7 @@ struct msm_rbmemptrs {
volatile uint32_t fence;
volatile struct msm_gpu_submit_stats stats[MSM_GPU_SUBMIT_STATS_COUNT];
volatile u64 ttbr0;
};
struct msm_ringbuffer {

View File

@ -12,6 +12,8 @@ void msm_submitqueue_destroy(struct kref *kref)
struct msm_gpu_submitqueue *queue = container_of(kref,
struct msm_gpu_submitqueue, ref);
msm_file_private_put(queue->ctx);
kfree(queue);
}
@ -49,8 +51,10 @@ void msm_submitqueue_close(struct msm_file_private *ctx)
* No lock needed in close and there won't
* be any more user ioctls coming our way
*/
list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node)
list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node) {
list_del(&entry->node);
msm_submitqueue_put(entry);
}
}
int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
@ -81,6 +85,7 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
write_lock(&ctx->queuelock);
queue->ctx = msm_file_private_get(ctx);
queue->id = ctx->queueid++;
if (id)

View File

@ -1167,6 +1167,7 @@ struct drm_device;
#define DP_MST_PHYSICAL_PORT_0 0
#define DP_MST_LOGICAL_PORT_0 8
#define DP_LINK_CONSTANT_N_VALUE 0x8000
#define DP_LINK_STATUS_SIZE 6
bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count);

View File

@ -0,0 +1,36 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020 Google, Inc
*/
#ifndef __ADRENO_SMMU_PRIV_H
#define __ADRENO_SMMU_PRIV_H
#include <linux/io-pgtable.h>
/**
* struct adreno_smmu_priv - private interface between adreno-smmu and GPU
*
* @cookie: An opque token provided by adreno-smmu and passed
* back into the callbacks
* @get_ttbr1_cfg: Get the TTBR1 config for the GPUs context-bank
* @set_ttbr0_cfg: Set the TTBR0 config for the GPUs context bank. A
* NULL config disables TTBR0 translation, otherwise
* TTBR0 translation is enabled with the specified cfg
*
* The GPU driver (drm/msm) and adreno-smmu work together for controlling
* the GPU's SMMU instance. This is by necessity, as the GPU is directly
* updating the SMMU for context switches, while on the other hand we do
* not want to duplicate all of the initial setup logic from arm-smmu.
*
* This private interface is used for the two drivers to coordinate. The
* cookie and callback functions are populated when the GPU driver attaches
* it's domain.
*/
struct adreno_smmu_priv {
const void *cookie;
const struct io_pgtable_cfg *(*get_ttbr1_cfg)(const void *cookie);
int (*set_ttbr0_cfg)(const void *cookie, const struct io_pgtable_cfg *cfg);
};
#endif /* __ADRENO_SMMU_PRIV_H */