Merge tag 'drm-msm-fixes-2023-07-27' of https://gitlab.freedesktop.org/drm/msm into drm-fixes

Fixes for v6.5-rc4

Display:
+ Fix to correct the UBWC programming for decoder version 4.3 seen
  on SM8550
+ Add the missing flush and fetch bits for DMA4 and DMA5 SSPPs.
+ Fix to drop the unused dpu_core_perf_data_bus_id enum from the code
+ Drop the unused dsi_phy_14nm_17mA_regulators from QCM 2290 DSI cfg.

GPU:
+ Fix warn splat for newer devices without revn
+ Remove name/revn for a690.. we shouldn't be populating these for
  newer devices, for consistency, but it slipped through review
+ Fix a6xx gpu snapshot BINDLESS_DATA size (was listed in bytes
  instead of dwords, causing AHB faults on a6xx gen4/a660-family)
+ Disallow submit with fence id 0

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Rob Clark <robdclark@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/CAF6AEGs9MwCSfiyv8i7yWAsJKYEzCDyzaTx=ujX80Y23rZd9RA@mail.gmail.com
This commit is contained in:
Dave Airlie 2023-07-28 11:59:14 +10:00
commit 9a767faa94
10 changed files with 54 additions and 28 deletions

View File

@ -89,7 +89,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
* since we've already mapped it once in
* submit_reloc()
*/
if (WARN_ON(!ptr))
if (WARN_ON(IS_ERR_OR_NULL(ptr)))
return;
for (i = 0; i < dwords; i++) {

View File

@ -206,7 +206,7 @@ static const struct a6xx_shader_block {
SHADER(A6XX_SP_LB_3_DATA, 0x800),
SHADER(A6XX_SP_LB_4_DATA, 0x800),
SHADER(A6XX_SP_LB_5_DATA, 0x200),
SHADER(A6XX_SP_CB_BINDLESS_DATA, 0x2000),
SHADER(A6XX_SP_CB_BINDLESS_DATA, 0x800),
SHADER(A6XX_SP_CB_LEGACY_DATA, 0x280),
SHADER(A6XX_SP_UAV_DATA, 0x80),
SHADER(A6XX_SP_INST_TAG, 0x80),

View File

@ -369,8 +369,6 @@ static const struct adreno_info gpulist[] = {
.hwcg = a640_hwcg,
}, {
.rev = ADRENO_REV(6, 9, 0, ANY_ID),
.revn = 690,
.name = "A690",
.fw = {
[ADRENO_FW_SQE] = "a660_sqe.fw",
[ADRENO_FW_GMU] = "a690_gmu.bin",

View File

@ -149,7 +149,8 @@ bool adreno_cmp_rev(struct adreno_rev rev1, struct adreno_rev rev2);
static inline bool adreno_is_revn(const struct adreno_gpu *gpu, uint32_t revn)
{
WARN_ON_ONCE(!gpu->revn);
/* revn can be zero, but if not is set at same time as info */
WARN_ON_ONCE(!gpu->info);
return gpu->revn == revn;
}
@ -161,14 +162,16 @@ static inline bool adreno_has_gmu_wrapper(const struct adreno_gpu *gpu)
static inline bool adreno_is_a2xx(const struct adreno_gpu *gpu)
{
WARN_ON_ONCE(!gpu->revn);
/* revn can be zero, but if not is set at same time as info */
WARN_ON_ONCE(!gpu->info);
return (gpu->revn < 300);
}
static inline bool adreno_is_a20x(const struct adreno_gpu *gpu)
{
WARN_ON_ONCE(!gpu->revn);
/* revn can be zero, but if not is set at same time as info */
WARN_ON_ONCE(!gpu->info);
return (gpu->revn < 210);
}
@ -307,7 +310,8 @@ static inline int adreno_is_a680(const struct adreno_gpu *gpu)
static inline int adreno_is_a690(const struct adreno_gpu *gpu)
{
return adreno_is_revn(gpu, 690);
/* The order of args is important here to handle ANY_ID correctly */
return adreno_cmp_rev(ADRENO_REV(6, 9, 0, ANY_ID), gpu->rev);
};
/* check for a615, a616, a618, a619 or any derivatives */

View File

@ -14,19 +14,6 @@
#define DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE 412500000
/**
* enum dpu_core_perf_data_bus_id - data bus identifier
* @DPU_CORE_PERF_DATA_BUS_ID_MNOC: DPU/MNOC data bus
* @DPU_CORE_PERF_DATA_BUS_ID_LLCC: MNOC/LLCC data bus
* @DPU_CORE_PERF_DATA_BUS_ID_EBI: LLCC/EBI data bus
*/
enum dpu_core_perf_data_bus_id {
DPU_CORE_PERF_DATA_BUS_ID_MNOC,
DPU_CORE_PERF_DATA_BUS_ID_LLCC,
DPU_CORE_PERF_DATA_BUS_ID_EBI,
DPU_CORE_PERF_DATA_BUS_ID_MAX,
};
/**
* struct dpu_core_perf_params - definition of performance parameters
* @max_per_pipe_ib: maximum instantaneous bandwidth request

View File

@ -51,7 +51,7 @@
static const u32 fetch_tbl[SSPP_MAX] = {CTL_INVALID_BIT, 16, 17, 18, 19,
CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, 0,
1, 2, 3, CTL_INVALID_BIT, CTL_INVALID_BIT};
1, 2, 3, 4, 5};
static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
enum dpu_lm lm)
@ -198,6 +198,12 @@ static void dpu_hw_ctl_update_pending_flush_sspp(struct dpu_hw_ctl *ctx,
case SSPP_DMA3:
ctx->pending_flush_mask |= BIT(25);
break;
case SSPP_DMA4:
ctx->pending_flush_mask |= BIT(13);
break;
case SSPP_DMA5:
ctx->pending_flush_mask |= BIT(14);
break;
case SSPP_CURSOR0:
ctx->pending_flush_mask |= BIT(22);
break;

View File

@ -1087,8 +1087,6 @@ const struct msm_dsi_phy_cfg dsi_phy_14nm_8953_cfgs = {
const struct msm_dsi_phy_cfg dsi_phy_14nm_2290_cfgs = {
.has_phy_lane = true,
.regulator_data = dsi_phy_14nm_17mA_regulators,
.num_regulators = ARRAY_SIZE(dsi_phy_14nm_17mA_regulators),
.ops = {
.enable = dsi_14nm_phy_enable,
.disable = dsi_14nm_phy_disable,

View File

@ -191,6 +191,12 @@ msm_fence_init(struct dma_fence *fence, struct msm_fence_context *fctx)
f->fctx = fctx;
/*
* Until this point, the fence was just some pre-allocated memory,
* no-one should have taken a reference to it yet.
*/
WARN_ON(kref_read(&fence->refcount));
dma_fence_init(&f->base, &msm_fence_ops, &fctx->spinlock,
fctx->context, ++fctx->last_fence);
}

View File

@ -86,7 +86,19 @@ void __msm_gem_submit_destroy(struct kref *kref)
}
dma_fence_put(submit->user_fence);
dma_fence_put(submit->hw_fence);
/*
* If the submit is freed before msm_job_run(), then hw_fence is
* just some pre-allocated memory, not a reference counted fence.
* Once the job runs and the hw_fence is initialized, it will
* have a refcount of at least one, since the submit holds a ref
* to the hw_fence.
*/
if (kref_read(&submit->hw_fence->refcount) == 0) {
kfree(submit->hw_fence);
} else {
dma_fence_put(submit->hw_fence);
}
put_pid(submit->pid);
msm_submitqueue_put(submit->queue);
@ -889,7 +901,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
* after the job is armed
*/
if ((args->flags & MSM_SUBMIT_FENCE_SN_IN) &&
idr_find(&queue->fence_idr, args->fence)) {
(!args->fence || idr_find(&queue->fence_idr, args->fence))) {
spin_unlock(&queue->idr_lock);
idr_preload_end();
ret = -EINVAL;

View File

@ -189,6 +189,7 @@ static int _msm_mdss_irq_domain_add(struct msm_mdss *msm_mdss)
#define UBWC_2_0 0x20000000
#define UBWC_3_0 0x30000000
#define UBWC_4_0 0x40000000
#define UBWC_4_3 0x40030000
static void msm_mdss_setup_ubwc_dec_20(struct msm_mdss *msm_mdss)
{
@ -227,7 +228,10 @@ static void msm_mdss_setup_ubwc_dec_40(struct msm_mdss *msm_mdss)
writel_relaxed(1, msm_mdss->mmio + UBWC_CTRL_2);
writel_relaxed(0, msm_mdss->mmio + UBWC_PREDICTION_MODE);
} else {
writel_relaxed(2, msm_mdss->mmio + UBWC_CTRL_2);
if (data->ubwc_dec_version == UBWC_4_3)
writel_relaxed(3, msm_mdss->mmio + UBWC_CTRL_2);
else
writel_relaxed(2, msm_mdss->mmio + UBWC_CTRL_2);
writel_relaxed(1, msm_mdss->mmio + UBWC_PREDICTION_MODE);
}
}
@ -271,6 +275,7 @@ static int msm_mdss_enable(struct msm_mdss *msm_mdss)
msm_mdss_setup_ubwc_dec_30(msm_mdss);
break;
case UBWC_4_0:
case UBWC_4_3:
msm_mdss_setup_ubwc_dec_40(msm_mdss);
break;
default:
@ -569,6 +574,16 @@ static const struct msm_mdss_data sm8250_data = {
.macrotile_mode = 1,
};
static const struct msm_mdss_data sm8550_data = {
.ubwc_version = UBWC_4_0,
.ubwc_dec_version = UBWC_4_3,
.ubwc_swizzle = 6,
.ubwc_static = 1,
/* TODO: highest_bank_bit = 2 for LP_DDR4 */
.highest_bank_bit = 3,
.macrotile_mode = 1,
};
static const struct of_device_id mdss_dt_match[] = {
{ .compatible = "qcom,mdss" },
{ .compatible = "qcom,msm8998-mdss" },
@ -585,7 +600,7 @@ static const struct of_device_id mdss_dt_match[] = {
{ .compatible = "qcom,sm8250-mdss", .data = &sm8250_data },
{ .compatible = "qcom,sm8350-mdss", .data = &sm8250_data },
{ .compatible = "qcom,sm8450-mdss", .data = &sm8250_data },
{ .compatible = "qcom,sm8550-mdss", .data = &sm8250_data },
{ .compatible = "qcom,sm8550-mdss", .data = &sm8550_data },
{}
};
MODULE_DEVICE_TABLE(of, mdss_dt_match);