mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-30 22:26:55 +00:00
drm fixes for 6.5-rc2
fbdev: - dma: Fix documented default preferred_bpp value ttm: - fix warning that we shouldn't mix && and || - never consider pinned BOs for eviction&swap - Don't leak a resource on eviction error - Don't leak a resource on swapout move error - fix bulk_move corruption when adding a entry client: - Send hotplug event after registering a client dma-buf: - keep the signaling time of merged fences v3 - fix an error pointer vs NULL bug sched: - wait for all deps in kill jobs - call set fence parent from scheduled i915: - Don't preserve dpll_hw_state for slave crtc in Bigjoiner - Consider OA buffer boundary when zeroing out reports - Remove dead code from gen8_pte_encode - Fix one wrong caching mode enum usage amdgpu: - SMU i2c locking fix - Fix a possible deadlock in process restoration for ROCm apps - Disable PCIe lane/speed switching on Intel platforms (the platforms don't support it) nouveau: - disp: fix HDMI on gt215+ - disp/g94: enable HDMI - acr: Abort loading ACR if no firmware was found - bring back blit subchannel for pre nv50 GPUs - Fix drm_dp_remove_payload() invocation ivpu: - Fix VPU register access in irq disable - Clear specific interrupt status bits on C0 bridge: - dw_hdmi: fix connector access for scdc - ti-sn65dsi86: Fix auxiliary bus lifetime panel: - simple: Add connector_type for innolux_at043tn24 - simple: Add Powertip PH800480T013 drm_display_mode flags -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmSwzZUACgkQDHTzWXnE hr7l4g/7BHV7ZF3Zmie/KKUzbukOiTnbfNoIUv3F3I4SRaKHTHpwvNt6SsCH0tfk 6My+bJavsU2qQ2muoJq9zr53rLOdI9Wt2G7Bmk6WZsd/DbeaoIDpThUErUV66aHN hHpdnqZnL5ASCn7wX2pfsEf+m6CbqimCxRX3AGLxleon/QBRJ+K6LM2K2Huzjbzx Pf67oLnS3DrAvN8xkWbeYib7acwo+JjD7Z0bWLdglJ05WhLhzH/P1VvmPRGruqze h2Tokn/671YJl01GWw+UmBUSaII/9Gaa6c2W6WYq10R/wU1C5AsMiAFdwtjLkV0z InQoyHxQz0m69XSjAdU5IVGBDsCEYccJ0c7qPxe1efLhoE3eLHhkKZ2EyxgN5/ln n2uoM0DdCZaRGrIvrDkpxbHOrX8LyTVBA4b3SegXIN6g4DjYFMvtg+xxv7BaaRCN 8Dzq1MVgcFfAs+ld9xyw93TyhZFFHvjMMwT1EhQjUiRHIAQncmnforlAtf9kC8fD CN9R/s+NYa8Crr9iRIZ0Eylv9ZjaQeXkxjWeFcqMmBU+m4b/uiW/oaM3ILhUh/IS 22AOxzY/g2yi2B6WRUhhWTuqUtYhnJ7rPN3/EndD9FUsIyKV3+PVK8q9Au0LqfyH Mfq4ToSyz+TMhv2gQcZACsAt9QP/fE9HDGfdeEGjUiaIpMXnGeI= =LPvb -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2023-07-14-1' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Dave Airlie: "There were a bunch of fixes lined up for 2 weeks, so we have quite a few scattered fixes, mostly amdgpu and i915, but ttm has a bunch and nouveau makes an appearance. So a bit busier than usual for rc2, but nothing seems out of the ordinary. fbdev: - dma: Fix documented default preferred_bpp value ttm: - fix warning that we shouldn't mix && and || - never consider pinned BOs for eviction&swap - Don't leak a resource on eviction error - Don't leak a resource on swapout move error - fix bulk_move corruption when adding a entry client: - Send hotplug event after registering a client dma-buf: - keep the signaling time of merged fences v3 - fix an error pointer vs NULL bug sched: - wait for all deps in kill jobs - call set fence parent from scheduled i915: - Don't preserve dpll_hw_state for slave crtc in Bigjoiner - Consider OA buffer boundary when zeroing out reports - Remove dead code from gen8_pte_encode - Fix one wrong caching mode enum usage amdgpu: - SMU i2c locking fix - Fix a possible deadlock in process restoration for ROCm apps - Disable PCIe lane/speed switching on Intel platforms (the platforms don't support it) nouveau: - disp: fix HDMI on gt215+ - disp/g94: enable HDMI - acr: Abort loading ACR if no firmware was found - bring back blit subchannel for pre nv50 GPUs - Fix drm_dp_remove_payload() invocation ivpu: - Fix VPU register access in irq disable - Clear specific interrupt status bits on C0 bridge: - dw_hdmi: fix connector access for scdc - ti-sn65dsi86: Fix auxiliary bus lifetime panel: - simple: Add connector_type for innolux_at043tn24 - simple: Add Powertip PH800480T013 drm_display_mode flags" * tag 'drm-fixes-2023-07-14-1' of git://anongit.freedesktop.org/drm/drm: (32 commits) drm/nouveau: bring back blit subchannel for pre nv50 GPUs drm/nouveau/acr: Abort loading ACR if no firmware was found drm/amd: Align SMU11 SMU_MSG_OverridePcieParameters implementation with SMU13 drm/amd: Move helper for dynamic speed switch check out of smu13 drm/amd/pm: conditionally disable pcie lane/speed switching for SMU13 drm/amd/pm: share the code around SMU13 pcie parameters update drm/amdgpu: avoid restore process run into dead loop. drm/amd/pm: fix smu i2c data read risk drm/nouveau/disp/g94: enable HDMI drm/nouveau/disp: fix HDMI on gt215+ drm/client: Send hotplug event after registering a client drm/i915: Fix one wrong caching mode enum usage drm/i915: Remove dead code from gen8_pte_encode drm/i915/perf: Consider OA buffer boundary when zeroing out reports drm/i915: Don't preserve dpll_hw_state for slave crtc in Bigjoiner drm/ttm: never consider pinned BOs for eviction&swap drm/fbdev-dma: Fix documented default preferred_bpp value dma-buf: fix an error pointer vs NULL bug accel/ivpu: Clear specific interrupt status bits on C0 accel/ivpu: Fix VPU register access in irq disable ...
This commit is contained in:
commit
3a97a2993e
48 changed files with 318 additions and 266 deletions
|
@ -75,6 +75,7 @@ struct ivpu_wa_table {
|
|||
bool punit_disabled;
|
||||
bool clear_runtime_mem;
|
||||
bool d3hot_after_power_off;
|
||||
bool interrupt_clear_with_0;
|
||||
};
|
||||
|
||||
struct ivpu_hw_info;
|
||||
|
|
|
@ -101,6 +101,9 @@ static void ivpu_hw_wa_init(struct ivpu_device *vdev)
|
|||
vdev->wa.punit_disabled = ivpu_is_fpga(vdev);
|
||||
vdev->wa.clear_runtime_mem = false;
|
||||
vdev->wa.d3hot_after_power_off = true;
|
||||
|
||||
if (ivpu_device_id(vdev) == PCI_DEVICE_ID_MTL && ivpu_revision(vdev) < 4)
|
||||
vdev->wa.interrupt_clear_with_0 = true;
|
||||
}
|
||||
|
||||
static void ivpu_hw_timeouts_init(struct ivpu_device *vdev)
|
||||
|
@ -885,7 +888,7 @@ static void ivpu_hw_mtl_irq_disable(struct ivpu_device *vdev)
|
|||
REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x1);
|
||||
REGB_WR32(MTL_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_DISABLE_MASK);
|
||||
REGV_WR64(MTL_VPU_HOST_SS_ICB_ENABLE_0, 0x0ull);
|
||||
REGB_WR32(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, 0x0);
|
||||
REGV_WR32(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, 0x0);
|
||||
}
|
||||
|
||||
static void ivpu_hw_mtl_irq_wdt_nce_handler(struct ivpu_device *vdev)
|
||||
|
@ -973,12 +976,15 @@ static u32 ivpu_hw_mtl_irqb_handler(struct ivpu_device *vdev, int irq)
|
|||
schedule_recovery = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Clear local interrupt status by writing 0 to all bits.
|
||||
* This must be done after interrupts are cleared at the source.
|
||||
* Writing 1 triggers an interrupt, so we can't perform read update write.
|
||||
*/
|
||||
REGB_WR32(MTL_BUTTRESS_INTERRUPT_STAT, 0x0);
|
||||
/* This must be done after interrupts are cleared at the source. */
|
||||
if (IVPU_WA(interrupt_clear_with_0))
|
||||
/*
|
||||
* Writing 1 triggers an interrupt, so we can't perform read update write.
|
||||
* Clear local interrupt status by writing 0 to all bits.
|
||||
*/
|
||||
REGB_WR32(MTL_BUTTRESS_INTERRUPT_STAT, 0x0);
|
||||
else
|
||||
REGB_WR32(MTL_BUTTRESS_INTERRUPT_STAT, status);
|
||||
|
||||
/* Re-enable global interrupt */
|
||||
REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x0);
|
||||
|
|
|
@ -66,18 +66,36 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
|
|||
{
|
||||
struct dma_fence_array *result;
|
||||
struct dma_fence *tmp, **array;
|
||||
ktime_t timestamp;
|
||||
unsigned int i;
|
||||
size_t count;
|
||||
|
||||
count = 0;
|
||||
timestamp = ns_to_ktime(0);
|
||||
for (i = 0; i < num_fences; ++i) {
|
||||
dma_fence_unwrap_for_each(tmp, &iter[i], fences[i])
|
||||
if (!dma_fence_is_signaled(tmp))
|
||||
dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) {
|
||||
if (!dma_fence_is_signaled(tmp)) {
|
||||
++count;
|
||||
} else if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT,
|
||||
&tmp->flags)) {
|
||||
if (ktime_after(tmp->timestamp, timestamp))
|
||||
timestamp = tmp->timestamp;
|
||||
} else {
|
||||
/*
|
||||
* Use the current time if the fence is
|
||||
* currently signaling.
|
||||
*/
|
||||
timestamp = ktime_get();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If we couldn't find a pending fence just return a private signaled
|
||||
* fence with the timestamp of the last signaled one.
|
||||
*/
|
||||
if (count == 0)
|
||||
return dma_fence_get_stub();
|
||||
return dma_fence_allocate_private_stub(timestamp);
|
||||
|
||||
array = kmalloc_array(count, sizeof(*array), GFP_KERNEL);
|
||||
if (!array)
|
||||
|
@ -138,7 +156,7 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
|
|||
} while (tmp);
|
||||
|
||||
if (count == 0) {
|
||||
tmp = dma_fence_get_stub();
|
||||
tmp = dma_fence_allocate_private_stub(ktime_get());
|
||||
goto return_tmp;
|
||||
}
|
||||
|
||||
|
|
|
@ -150,16 +150,17 @@ EXPORT_SYMBOL(dma_fence_get_stub);
|
|||
|
||||
/**
|
||||
* dma_fence_allocate_private_stub - return a private, signaled fence
|
||||
* @timestamp: timestamp when the fence was signaled
|
||||
*
|
||||
* Return a newly allocated and signaled stub fence.
|
||||
*/
|
||||
struct dma_fence *dma_fence_allocate_private_stub(void)
|
||||
struct dma_fence *dma_fence_allocate_private_stub(ktime_t timestamp)
|
||||
{
|
||||
struct dma_fence *fence;
|
||||
|
||||
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
|
||||
if (fence == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
return NULL;
|
||||
|
||||
dma_fence_init(fence,
|
||||
&dma_fence_stub_ops,
|
||||
|
@ -169,7 +170,7 @@ struct dma_fence *dma_fence_allocate_private_stub(void)
|
|||
set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
|
||||
&fence->flags);
|
||||
|
||||
dma_fence_signal(fence);
|
||||
dma_fence_signal_timestamp(fence, timestamp);
|
||||
|
||||
return fence;
|
||||
}
|
||||
|
|
|
@ -1296,6 +1296,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
|||
void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
|
||||
int amdgpu_device_pci_reset(struct amdgpu_device *adev);
|
||||
bool amdgpu_device_need_post(struct amdgpu_device *adev);
|
||||
bool amdgpu_device_pcie_dynamic_switching_supported(void);
|
||||
bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev);
|
||||
bool amdgpu_device_aspm_support_quirk(void);
|
||||
|
||||
|
|
|
@ -2881,6 +2881,9 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
|
|||
if (!attachment->is_mapped)
|
||||
continue;
|
||||
|
||||
if (attachment->bo_va->base.bo->tbo.pin_count)
|
||||
continue;
|
||||
|
||||
kfd_mem_dmaunmap_attachment(mem, attachment);
|
||||
ret = update_gpuvm_pte(mem, attachment, &sync_obj);
|
||||
if (ret) {
|
||||
|
|
|
@ -1458,6 +1458,25 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
|
|||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic
|
||||
* speed switching. Until we have confirmation from Intel that a specific host
|
||||
* supports it, it's safer that we keep it disabled for all.
|
||||
*
|
||||
* https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/
|
||||
* https://gitlab.freedesktop.org/drm/amd/-/issues/2663
|
||||
*/
|
||||
bool amdgpu_device_pcie_dynamic_switching_supported(void)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_X86)
|
||||
struct cpuinfo_x86 *c = &cpu_data(0);
|
||||
|
||||
if (c->x86_vendor == X86_VENDOR_INTEL)
|
||||
return false;
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_device_should_use_aspm - check if the device should program ASPM
|
||||
*
|
||||
|
|
|
@ -295,5 +295,9 @@ int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu,
|
|||
uint32_t *size,
|
||||
uint32_t pptable_id);
|
||||
|
||||
int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
|
||||
uint32_t pcie_gen_cap,
|
||||
uint32_t pcie_width_cap);
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -2113,7 +2113,6 @@ static int arcturus_i2c_xfer(struct i2c_adapter *i2c_adap,
|
|||
}
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
if (r)
|
||||
goto fail;
|
||||
|
||||
|
@ -2130,6 +2129,7 @@ static int arcturus_i2c_xfer(struct i2c_adapter *i2c_adap,
|
|||
}
|
||||
r = num_msgs;
|
||||
fail:
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
kfree(req);
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -3021,7 +3021,6 @@ static int navi10_i2c_xfer(struct i2c_adapter *i2c_adap,
|
|||
}
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
if (r)
|
||||
goto fail;
|
||||
|
||||
|
@ -3038,6 +3037,7 @@ static int navi10_i2c_xfer(struct i2c_adapter *i2c_adap,
|
|||
}
|
||||
r = num_msgs;
|
||||
fail:
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
kfree(req);
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -2077,89 +2077,36 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void sienna_cichlid_get_override_pcie_settings(struct smu_context *smu,
|
||||
uint32_t *gen_speed_override,
|
||||
uint32_t *lane_width_override)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
|
||||
*gen_speed_override = 0xff;
|
||||
*lane_width_override = 0xff;
|
||||
|
||||
switch (adev->pdev->device) {
|
||||
case 0x73A0:
|
||||
case 0x73A1:
|
||||
case 0x73A2:
|
||||
case 0x73A3:
|
||||
case 0x73AB:
|
||||
case 0x73AE:
|
||||
/* Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 */
|
||||
*lane_width_override = 6;
|
||||
break;
|
||||
case 0x73E0:
|
||||
case 0x73E1:
|
||||
case 0x73E3:
|
||||
*lane_width_override = 4;
|
||||
break;
|
||||
case 0x7420:
|
||||
case 0x7421:
|
||||
case 0x7422:
|
||||
case 0x7423:
|
||||
case 0x7424:
|
||||
*lane_width_override = 3;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
#define MAX(a, b) ((a) > (b) ? (a) : (b))
|
||||
|
||||
static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
|
||||
uint32_t pcie_gen_cap,
|
||||
uint32_t pcie_width_cap)
|
||||
{
|
||||
struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
|
||||
struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table;
|
||||
uint32_t gen_speed_override, lane_width_override;
|
||||
uint8_t *table_member1, *table_member2;
|
||||
uint32_t min_gen_speed, max_gen_speed;
|
||||
uint32_t min_lane_width, max_lane_width;
|
||||
uint32_t smu_pcie_arg;
|
||||
u32 smu_pcie_arg;
|
||||
int ret, i;
|
||||
|
||||
GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1);
|
||||
GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2);
|
||||
/* PCIE gen speed and lane width override */
|
||||
if (!amdgpu_device_pcie_dynamic_switching_supported()) {
|
||||
if (pcie_table->pcie_gen[NUM_LINK_LEVELS - 1] < pcie_gen_cap)
|
||||
pcie_gen_cap = pcie_table->pcie_gen[NUM_LINK_LEVELS - 1];
|
||||
|
||||
sienna_cichlid_get_override_pcie_settings(smu,
|
||||
&gen_speed_override,
|
||||
&lane_width_override);
|
||||
if (pcie_table->pcie_lane[NUM_LINK_LEVELS - 1] < pcie_width_cap)
|
||||
pcie_width_cap = pcie_table->pcie_lane[NUM_LINK_LEVELS - 1];
|
||||
|
||||
/* PCIE gen speed override */
|
||||
if (gen_speed_override != 0xff) {
|
||||
min_gen_speed = MIN(pcie_gen_cap, gen_speed_override);
|
||||
max_gen_speed = MIN(pcie_gen_cap, gen_speed_override);
|
||||
/* Force all levels to use the same settings */
|
||||
for (i = 0; i < NUM_LINK_LEVELS; i++) {
|
||||
pcie_table->pcie_gen[i] = pcie_gen_cap;
|
||||
pcie_table->pcie_lane[i] = pcie_width_cap;
|
||||
}
|
||||
} else {
|
||||
min_gen_speed = MAX(0, table_member1[0]);
|
||||
max_gen_speed = MIN(pcie_gen_cap, table_member1[1]);
|
||||
min_gen_speed = min_gen_speed > max_gen_speed ?
|
||||
max_gen_speed : min_gen_speed;
|
||||
for (i = 0; i < NUM_LINK_LEVELS; i++) {
|
||||
if (pcie_table->pcie_gen[i] > pcie_gen_cap)
|
||||
pcie_table->pcie_gen[i] = pcie_gen_cap;
|
||||
if (pcie_table->pcie_lane[i] > pcie_width_cap)
|
||||
pcie_table->pcie_lane[i] = pcie_width_cap;
|
||||
}
|
||||
}
|
||||
pcie_table->pcie_gen[0] = min_gen_speed;
|
||||
pcie_table->pcie_gen[1] = max_gen_speed;
|
||||
|
||||
/* PCIE lane width override */
|
||||
if (lane_width_override != 0xff) {
|
||||
min_lane_width = MIN(pcie_width_cap, lane_width_override);
|
||||
max_lane_width = MIN(pcie_width_cap, lane_width_override);
|
||||
} else {
|
||||
min_lane_width = MAX(1, table_member2[0]);
|
||||
max_lane_width = MIN(pcie_width_cap, table_member2[1]);
|
||||
min_lane_width = min_lane_width > max_lane_width ?
|
||||
max_lane_width : min_lane_width;
|
||||
}
|
||||
pcie_table->pcie_lane[0] = min_lane_width;
|
||||
pcie_table->pcie_lane[1] = max_lane_width;
|
||||
|
||||
for (i = 0; i < NUM_LINK_LEVELS; i++) {
|
||||
smu_pcie_arg = (i << 16 |
|
||||
|
@ -3842,7 +3789,6 @@ static int sienna_cichlid_i2c_xfer(struct i2c_adapter *i2c_adap,
|
|||
}
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
if (r)
|
||||
goto fail;
|
||||
|
||||
|
@ -3859,6 +3805,7 @@ static int sienna_cichlid_i2c_xfer(struct i2c_adapter *i2c_adap,
|
|||
}
|
||||
r = num_msgs;
|
||||
fail:
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
kfree(req);
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -1525,7 +1525,6 @@ static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap,
|
|||
}
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
if (r)
|
||||
goto fail;
|
||||
|
||||
|
@ -1542,6 +1541,7 @@ static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap,
|
|||
}
|
||||
r = num_msgs;
|
||||
fail:
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
kfree(req);
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -2424,3 +2424,51 @@ int smu_v13_0_mode1_reset(struct smu_context *smu)
|
|||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
|
||||
uint32_t pcie_gen_cap,
|
||||
uint32_t pcie_width_cap)
|
||||
{
|
||||
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
|
||||
struct smu_13_0_pcie_table *pcie_table =
|
||||
&dpm_context->dpm_tables.pcie_table;
|
||||
int num_of_levels = pcie_table->num_of_link_levels;
|
||||
uint32_t smu_pcie_arg;
|
||||
int ret, i;
|
||||
|
||||
if (!amdgpu_device_pcie_dynamic_switching_supported()) {
|
||||
if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap)
|
||||
pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1];
|
||||
|
||||
if (pcie_table->pcie_lane[num_of_levels - 1] < pcie_width_cap)
|
||||
pcie_width_cap = pcie_table->pcie_lane[num_of_levels - 1];
|
||||
|
||||
/* Force all levels to use the same settings */
|
||||
for (i = 0; i < num_of_levels; i++) {
|
||||
pcie_table->pcie_gen[i] = pcie_gen_cap;
|
||||
pcie_table->pcie_lane[i] = pcie_width_cap;
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < num_of_levels; i++) {
|
||||
if (pcie_table->pcie_gen[i] > pcie_gen_cap)
|
||||
pcie_table->pcie_gen[i] = pcie_gen_cap;
|
||||
if (pcie_table->pcie_lane[i] > pcie_width_cap)
|
||||
pcie_table->pcie_lane[i] = pcie_width_cap;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < num_of_levels; i++) {
|
||||
smu_pcie_arg = i << 16;
|
||||
smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
|
||||
smu_pcie_arg |= pcie_table->pcie_lane[i];
|
||||
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_OverridePcieParameters,
|
||||
smu_pcie_arg,
|
||||
NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1645,37 +1645,6 @@ static int smu_v13_0_0_force_clk_levels(struct smu_context *smu,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int smu_v13_0_0_update_pcie_parameters(struct smu_context *smu,
|
||||
uint32_t pcie_gen_cap,
|
||||
uint32_t pcie_width_cap)
|
||||
{
|
||||
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
|
||||
struct smu_13_0_pcie_table *pcie_table =
|
||||
&dpm_context->dpm_tables.pcie_table;
|
||||
uint32_t smu_pcie_arg;
|
||||
int ret, i;
|
||||
|
||||
for (i = 0; i < pcie_table->num_of_link_levels; i++) {
|
||||
if (pcie_table->pcie_gen[i] > pcie_gen_cap)
|
||||
pcie_table->pcie_gen[i] = pcie_gen_cap;
|
||||
if (pcie_table->pcie_lane[i] > pcie_width_cap)
|
||||
pcie_table->pcie_lane[i] = pcie_width_cap;
|
||||
|
||||
smu_pcie_arg = i << 16;
|
||||
smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
|
||||
smu_pcie_arg |= pcie_table->pcie_lane[i];
|
||||
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_OverridePcieParameters,
|
||||
smu_pcie_arg,
|
||||
NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct smu_temperature_range smu13_thermal_policy[] = {
|
||||
{-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
|
||||
{ 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
|
||||
|
@ -2320,7 +2289,6 @@ static int smu_v13_0_0_i2c_xfer(struct i2c_adapter *i2c_adap,
|
|||
}
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
if (r)
|
||||
goto fail;
|
||||
|
||||
|
@ -2337,6 +2305,7 @@ static int smu_v13_0_0_i2c_xfer(struct i2c_adapter *i2c_adap,
|
|||
}
|
||||
r = num_msgs;
|
||||
fail:
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
kfree(req);
|
||||
return r;
|
||||
}
|
||||
|
@ -2654,7 +2623,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
|
|||
.feature_is_enabled = smu_cmn_feature_is_enabled,
|
||||
.print_clk_levels = smu_v13_0_0_print_clk_levels,
|
||||
.force_clk_levels = smu_v13_0_0_force_clk_levels,
|
||||
.update_pcie_parameters = smu_v13_0_0_update_pcie_parameters,
|
||||
.update_pcie_parameters = smu_v13_0_update_pcie_parameters,
|
||||
.get_thermal_temperature_range = smu_v13_0_0_get_thermal_temperature_range,
|
||||
.register_irq_handler = smu_v13_0_register_irq_handler,
|
||||
.enable_thermal_alert = smu_v13_0_enable_thermal_alert,
|
||||
|
|
|
@ -1763,7 +1763,6 @@ static int smu_v13_0_6_i2c_xfer(struct i2c_adapter *i2c_adap,
|
|||
}
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
r = smu_v13_0_6_request_i2c_xfer(smu, req);
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
if (r)
|
||||
goto fail;
|
||||
|
||||
|
@ -1780,6 +1779,7 @@ static int smu_v13_0_6_i2c_xfer(struct i2c_adapter *i2c_adap,
|
|||
}
|
||||
r = num_msgs;
|
||||
fail:
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
kfree(req);
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -1635,37 +1635,6 @@ static int smu_v13_0_7_force_clk_levels(struct smu_context *smu,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int smu_v13_0_7_update_pcie_parameters(struct smu_context *smu,
|
||||
uint32_t pcie_gen_cap,
|
||||
uint32_t pcie_width_cap)
|
||||
{
|
||||
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
|
||||
struct smu_13_0_pcie_table *pcie_table =
|
||||
&dpm_context->dpm_tables.pcie_table;
|
||||
uint32_t smu_pcie_arg;
|
||||
int ret, i;
|
||||
|
||||
for (i = 0; i < pcie_table->num_of_link_levels; i++) {
|
||||
if (pcie_table->pcie_gen[i] > pcie_gen_cap)
|
||||
pcie_table->pcie_gen[i] = pcie_gen_cap;
|
||||
if (pcie_table->pcie_lane[i] > pcie_width_cap)
|
||||
pcie_table->pcie_lane[i] = pcie_width_cap;
|
||||
|
||||
smu_pcie_arg = i << 16;
|
||||
smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
|
||||
smu_pcie_arg |= pcie_table->pcie_lane[i];
|
||||
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_OverridePcieParameters,
|
||||
smu_pcie_arg,
|
||||
NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct smu_temperature_range smu13_thermal_policy[] =
|
||||
{
|
||||
{-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
|
||||
|
@ -2234,7 +2203,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
|
|||
.feature_is_enabled = smu_cmn_feature_is_enabled,
|
||||
.print_clk_levels = smu_v13_0_7_print_clk_levels,
|
||||
.force_clk_levels = smu_v13_0_7_force_clk_levels,
|
||||
.update_pcie_parameters = smu_v13_0_7_update_pcie_parameters,
|
||||
.update_pcie_parameters = smu_v13_0_update_pcie_parameters,
|
||||
.get_thermal_temperature_range = smu_v13_0_7_get_thermal_temperature_range,
|
||||
.register_irq_handler = smu_v13_0_register_irq_handler,
|
||||
.enable_thermal_alert = smu_v13_0_enable_thermal_alert,
|
||||
|
|
|
@ -209,10 +209,6 @@ void armada_fbdev_setup(struct drm_device *dev)
|
|||
goto err_drm_client_init;
|
||||
}
|
||||
|
||||
ret = armada_fbdev_client_hotplug(&fbh->client);
|
||||
if (ret)
|
||||
drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
|
||||
|
||||
drm_client_register(&fbh->client);
|
||||
|
||||
return;
|
||||
|
|
|
@ -1426,9 +1426,9 @@ void dw_hdmi_set_high_tmds_clock_ratio(struct dw_hdmi *hdmi,
|
|||
/* Control for TMDS Bit Period/TMDS Clock-Period Ratio */
|
||||
if (dw_hdmi_support_scdc(hdmi, display)) {
|
||||
if (mtmdsclock > HDMI14_MAX_TMDSCLK)
|
||||
drm_scdc_set_high_tmds_clock_ratio(&hdmi->connector, 1);
|
||||
drm_scdc_set_high_tmds_clock_ratio(hdmi->curr_conn, 1);
|
||||
else
|
||||
drm_scdc_set_high_tmds_clock_ratio(&hdmi->connector, 0);
|
||||
drm_scdc_set_high_tmds_clock_ratio(hdmi->curr_conn, 0);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dw_hdmi_set_high_tmds_clock_ratio);
|
||||
|
@ -2116,7 +2116,7 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
|
|||
min_t(u8, bytes, SCDC_MIN_SOURCE_VERSION));
|
||||
|
||||
/* Enabled Scrambling in the Sink */
|
||||
drm_scdc_set_scrambling(&hdmi->connector, 1);
|
||||
drm_scdc_set_scrambling(hdmi->curr_conn, 1);
|
||||
|
||||
/*
|
||||
* To activate the scrambler feature, you must ensure
|
||||
|
@ -2132,7 +2132,7 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
|
|||
hdmi_writeb(hdmi, 0, HDMI_FC_SCRAMBLER_CTRL);
|
||||
hdmi_writeb(hdmi, (u8)~HDMI_MC_SWRSTZ_TMDSSWRST_REQ,
|
||||
HDMI_MC_SWRSTZ);
|
||||
drm_scdc_set_scrambling(&hdmi->connector, 0);
|
||||
drm_scdc_set_scrambling(hdmi->curr_conn, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3553,6 +3553,7 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
|
|||
hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID
|
||||
| DRM_BRIDGE_OP_HPD;
|
||||
hdmi->bridge.interlace_allowed = true;
|
||||
hdmi->bridge.ddc = hdmi->ddc;
|
||||
#ifdef CONFIG_OF
|
||||
hdmi->bridge.of_node = pdev->dev.of_node;
|
||||
#endif
|
||||
|
|
|
@ -170,10 +170,10 @@
|
|||
* @pwm_refclk_freq: Cache for the reference clock input to the PWM.
|
||||
*/
|
||||
struct ti_sn65dsi86 {
|
||||
struct auxiliary_device bridge_aux;
|
||||
struct auxiliary_device gpio_aux;
|
||||
struct auxiliary_device aux_aux;
|
||||
struct auxiliary_device pwm_aux;
|
||||
struct auxiliary_device *bridge_aux;
|
||||
struct auxiliary_device *gpio_aux;
|
||||
struct auxiliary_device *aux_aux;
|
||||
struct auxiliary_device *pwm_aux;
|
||||
|
||||
struct device *dev;
|
||||
struct regmap *regmap;
|
||||
|
@ -468,27 +468,34 @@ static void ti_sn65dsi86_delete_aux(void *data)
|
|||
auxiliary_device_delete(data);
|
||||
}
|
||||
|
||||
/*
|
||||
* AUX bus docs say that a non-NULL release is mandatory, but it makes no
|
||||
* sense for the model used here where all of the aux devices are allocated
|
||||
* in the single shared structure. We'll use this noop as a workaround.
|
||||
*/
|
||||
static void ti_sn65dsi86_noop(struct device *dev) {}
|
||||
static void ti_sn65dsi86_aux_device_release(struct device *dev)
|
||||
{
|
||||
struct auxiliary_device *aux = container_of(dev, struct auxiliary_device, dev);
|
||||
|
||||
kfree(aux);
|
||||
}
|
||||
|
||||
static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata,
|
||||
struct auxiliary_device *aux,
|
||||
struct auxiliary_device **aux_out,
|
||||
const char *name)
|
||||
{
|
||||
struct device *dev = pdata->dev;
|
||||
struct auxiliary_device *aux;
|
||||
int ret;
|
||||
|
||||
aux = kzalloc(sizeof(*aux), GFP_KERNEL);
|
||||
if (!aux)
|
||||
return -ENOMEM;
|
||||
|
||||
aux->name = name;
|
||||
aux->dev.parent = dev;
|
||||
aux->dev.release = ti_sn65dsi86_noop;
|
||||
aux->dev.release = ti_sn65dsi86_aux_device_release;
|
||||
device_set_of_node_from_dev(&aux->dev, dev);
|
||||
ret = auxiliary_device_init(aux);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
kfree(aux);
|
||||
return ret;
|
||||
}
|
||||
ret = devm_add_action_or_reset(dev, ti_sn65dsi86_uninit_aux, aux);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -497,6 +504,8 @@ static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata,
|
|||
if (ret)
|
||||
return ret;
|
||||
ret = devm_add_action_or_reset(dev, ti_sn65dsi86_delete_aux, aux);
|
||||
if (!ret)
|
||||
*aux_out = aux;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -122,13 +122,34 @@ EXPORT_SYMBOL(drm_client_init);
|
|||
* drm_client_register() it is no longer permissible to call drm_client_release()
|
||||
* directly (outside the unregister callback), instead cleanup will happen
|
||||
* automatically on driver unload.
|
||||
*
|
||||
* Registering a client generates a hotplug event that allows the client
|
||||
* to set up its display from pre-existing outputs. The client must have
|
||||
* initialized its state to able to handle the hotplug event successfully.
|
||||
*/
|
||||
void drm_client_register(struct drm_client_dev *client)
|
||||
{
|
||||
struct drm_device *dev = client->dev;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&dev->clientlist_mutex);
|
||||
list_add(&client->list, &dev->clientlist);
|
||||
|
||||
if (client->funcs && client->funcs->hotplug) {
|
||||
/*
|
||||
* Perform an initial hotplug event to pick up the
|
||||
* display configuration for the client. This step
|
||||
* has to be performed *after* registering the client
|
||||
* in the list of clients, or a concurrent hotplug
|
||||
* event might be lost; leaving the display off.
|
||||
*
|
||||
* Hold the clientlist_mutex as for a regular hotplug
|
||||
* event.
|
||||
*/
|
||||
ret = client->funcs->hotplug(client);
|
||||
if (ret)
|
||||
drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
|
||||
}
|
||||
mutex_unlock(&dev->clientlist_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_client_register);
|
||||
|
|
|
@ -217,7 +217,7 @@ static const struct drm_client_funcs drm_fbdev_dma_client_funcs = {
|
|||
* drm_fbdev_dma_setup() - Setup fbdev emulation for GEM DMA helpers
|
||||
* @dev: DRM device
|
||||
* @preferred_bpp: Preferred bits per pixel for the device.
|
||||
* @dev->mode_config.preferred_depth is used if this is zero.
|
||||
* 32 is used if this is zero.
|
||||
*
|
||||
* This function sets up fbdev emulation for GEM DMA drivers that support
|
||||
* dumb buffers with a virtual address and that can be mmap'ed.
|
||||
|
@ -252,10 +252,6 @@ void drm_fbdev_dma_setup(struct drm_device *dev, unsigned int preferred_bpp)
|
|||
goto err_drm_client_init;
|
||||
}
|
||||
|
||||
ret = drm_fbdev_dma_client_hotplug(&fb_helper->client);
|
||||
if (ret)
|
||||
drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
|
||||
|
||||
drm_client_register(&fb_helper->client);
|
||||
|
||||
return;
|
||||
|
|
|
@ -339,10 +339,6 @@ void drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
|
|||
goto err_drm_client_init;
|
||||
}
|
||||
|
||||
ret = drm_fbdev_generic_client_hotplug(&fb_helper->client);
|
||||
if (ret)
|
||||
drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
|
||||
|
||||
drm_client_register(&fb_helper->client);
|
||||
|
||||
return;
|
||||
|
|
|
@ -353,10 +353,10 @@ EXPORT_SYMBOL(drm_syncobj_replace_fence);
|
|||
*/
|
||||
static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
|
||||
{
|
||||
struct dma_fence *fence = dma_fence_allocate_private_stub();
|
||||
struct dma_fence *fence = dma_fence_allocate_private_stub(ktime_get());
|
||||
|
||||
if (IS_ERR(fence))
|
||||
return PTR_ERR(fence);
|
||||
if (!fence)
|
||||
return -ENOMEM;
|
||||
|
||||
drm_syncobj_replace_fence(syncobj, fence);
|
||||
dma_fence_put(fence);
|
||||
|
|
|
@ -215,10 +215,6 @@ void exynos_drm_fbdev_setup(struct drm_device *dev)
|
|||
if (ret)
|
||||
goto err_drm_client_init;
|
||||
|
||||
ret = exynos_drm_fbdev_client_hotplug(&fb_helper->client);
|
||||
if (ret)
|
||||
drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
|
||||
|
||||
drm_client_register(&fb_helper->client);
|
||||
|
||||
return;
|
||||
|
|
|
@ -328,10 +328,6 @@ void psb_fbdev_setup(struct drm_psb_private *dev_priv)
|
|||
goto err_drm_fb_helper_unprepare;
|
||||
}
|
||||
|
||||
ret = psb_fbdev_client_hotplug(&fb_helper->client);
|
||||
if (ret)
|
||||
drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
|
||||
|
||||
drm_client_register(&fb_helper->client);
|
||||
|
||||
return;
|
||||
|
|
|
@ -4564,7 +4564,6 @@ copy_bigjoiner_crtc_state_modeset(struct intel_atomic_state *state,
|
|||
saved_state->uapi = slave_crtc_state->uapi;
|
||||
saved_state->scaler_state = slave_crtc_state->scaler_state;
|
||||
saved_state->shared_dpll = slave_crtc_state->shared_dpll;
|
||||
saved_state->dpll_hw_state = slave_crtc_state->dpll_hw_state;
|
||||
saved_state->crc_enabled = slave_crtc_state->crc_enabled;
|
||||
|
||||
intel_crtc_free_hw_state(slave_crtc_state);
|
||||
|
|
|
@ -37,9 +37,6 @@ static u64 gen8_pte_encode(dma_addr_t addr,
|
|||
if (unlikely(flags & PTE_READ_ONLY))
|
||||
pte &= ~GEN8_PAGE_RW;
|
||||
|
||||
if (flags & PTE_LM)
|
||||
pte |= GEN12_PPGTT_PTE_LM;
|
||||
|
||||
/*
|
||||
* For pre-gen12 platforms pat_index is the same as enum
|
||||
* i915_cache_level, so the switch-case here is still valid.
|
||||
|
|
|
@ -670,7 +670,7 @@ __vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size)
|
|||
if (IS_ERR(obj))
|
||||
return ERR_CAST(obj);
|
||||
|
||||
i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
|
||||
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
|
||||
|
||||
vma = i915_vma_instance(obj, vm, NULL);
|
||||
if (IS_ERR(vma)) {
|
||||
|
|
|
@ -868,8 +868,17 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
|
|||
oa_report_id_clear(stream, report32);
|
||||
oa_timestamp_clear(stream, report32);
|
||||
} else {
|
||||
u8 *oa_buf_end = stream->oa_buffer.vaddr +
|
||||
OA_BUFFER_SIZE;
|
||||
u32 part = oa_buf_end - (u8 *)report32;
|
||||
|
||||
/* Zero out the entire report */
|
||||
memset(report32, 0, report_size);
|
||||
if (report_size <= part) {
|
||||
memset(report32, 0, report_size);
|
||||
} else {
|
||||
memset(report32, 0, part);
|
||||
memset(oa_buf_base, 0, report_size - part);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -246,10 +246,6 @@ void msm_fbdev_setup(struct drm_device *dev)
|
|||
goto err_drm_fb_helper_unprepare;
|
||||
}
|
||||
|
||||
ret = msm_fbdev_client_hotplug(&helper->client);
|
||||
if (ret)
|
||||
drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
|
||||
|
||||
drm_client_register(&helper->client);
|
||||
|
||||
return;
|
||||
|
|
|
@ -910,15 +910,19 @@ nv50_msto_prepare(struct drm_atomic_state *state,
|
|||
struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
|
||||
struct nv50_mstc *mstc = msto->mstc;
|
||||
struct nv50_mstm *mstm = mstc->mstm;
|
||||
struct drm_dp_mst_atomic_payload *payload;
|
||||
struct drm_dp_mst_topology_state *old_mst_state;
|
||||
struct drm_dp_mst_atomic_payload *payload, *old_payload;
|
||||
|
||||
NV_ATOMIC(drm, "%s: msto prepare\n", msto->encoder.name);
|
||||
|
||||
old_mst_state = drm_atomic_get_old_mst_topology_state(state, mgr);
|
||||
|
||||
payload = drm_atomic_get_mst_payload_state(mst_state, mstc->port);
|
||||
old_payload = drm_atomic_get_mst_payload_state(old_mst_state, mstc->port);
|
||||
|
||||
// TODO: Figure out if we want to do a better job of handling VCPI allocation failures here?
|
||||
if (msto->disabled) {
|
||||
drm_dp_remove_payload(mgr, mst_state, payload, payload);
|
||||
drm_dp_remove_payload(mgr, mst_state, old_payload, payload);
|
||||
|
||||
nvif_outp_dp_mst_vcpi(&mstm->outp->outp, msto->head->base.index, 0, 0, 0, 0);
|
||||
} else {
|
||||
|
|
|
@ -90,6 +90,7 @@ nouveau_channel_del(struct nouveau_channel **pchan)
|
|||
if (cli)
|
||||
nouveau_svmm_part(chan->vmm->svmm, chan->inst);
|
||||
|
||||
nvif_object_dtor(&chan->blit);
|
||||
nvif_object_dtor(&chan->nvsw);
|
||||
nvif_object_dtor(&chan->gart);
|
||||
nvif_object_dtor(&chan->vram);
|
||||
|
|
|
@ -53,6 +53,7 @@ struct nouveau_channel {
|
|||
u32 user_put;
|
||||
|
||||
struct nvif_object user;
|
||||
struct nvif_object blit;
|
||||
|
||||
struct nvif_event kill;
|
||||
atomic_t killed;
|
||||
|
|
|
@ -375,15 +375,29 @@ nouveau_accel_gr_init(struct nouveau_drm *drm)
|
|||
ret = nvif_object_ctor(&drm->channel->user, "drmNvsw",
|
||||
NVDRM_NVSW, nouveau_abi16_swclass(drm),
|
||||
NULL, 0, &drm->channel->nvsw);
|
||||
|
||||
if (ret == 0 && device->info.chipset >= 0x11) {
|
||||
ret = nvif_object_ctor(&drm->channel->user, "drmBlit",
|
||||
0x005f, 0x009f,
|
||||
NULL, 0, &drm->channel->blit);
|
||||
}
|
||||
|
||||
if (ret == 0) {
|
||||
struct nvif_push *push = drm->channel->chan.push;
|
||||
ret = PUSH_WAIT(push, 2);
|
||||
if (ret == 0)
|
||||
ret = PUSH_WAIT(push, 8);
|
||||
if (ret == 0) {
|
||||
if (device->info.chipset >= 0x11) {
|
||||
PUSH_NVSQ(push, NV05F, 0x0000, drm->channel->blit.handle);
|
||||
PUSH_NVSQ(push, NV09F, 0x0120, 0,
|
||||
0x0124, 1,
|
||||
0x0128, 2);
|
||||
}
|
||||
PUSH_NVSQ(push, NV_SW, 0x0000, drm->channel->nvsw.handle);
|
||||
}
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
NV_ERROR(drm, "failed to allocate sw class, %d\n", ret);
|
||||
NV_ERROR(drm, "failed to allocate sw or blit class, %d\n", ret);
|
||||
nouveau_accel_gr_fini(drm);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -295,6 +295,7 @@ g94_sor = {
|
|||
.clock = nv50_sor_clock,
|
||||
.war_2 = g94_sor_war_2,
|
||||
.war_3 = g94_sor_war_3,
|
||||
.hdmi = &g84_sor_hdmi,
|
||||
.dp = &g94_sor_dp,
|
||||
};
|
||||
|
||||
|
|
|
@ -125,7 +125,7 @@ gt215_sor_hdmi_infoframe_avi(struct nvkm_ior *ior, int head, void *data, u32 siz
|
|||
pack_hdmi_infoframe(&avi, data, size);
|
||||
|
||||
nvkm_mask(device, 0x61c520 + soff, 0x00000001, 0x00000000);
|
||||
if (size)
|
||||
if (!size)
|
||||
return;
|
||||
|
||||
nvkm_wr32(device, 0x61c528 + soff, avi.header);
|
||||
|
|
|
@ -224,7 +224,7 @@ nvkm_acr_oneinit(struct nvkm_subdev *subdev)
|
|||
u64 falcons;
|
||||
int ret, i;
|
||||
|
||||
if (list_empty(&acr->hsfw)) {
|
||||
if (list_empty(&acr->hsfw) || !acr->func || !acr->func->wpr_layout) {
|
||||
nvkm_debug(subdev, "No HSFW(s)\n");
|
||||
nvkm_acr_cleanup(acr);
|
||||
return 0;
|
||||
|
|
|
@ -318,10 +318,6 @@ void omap_fbdev_setup(struct drm_device *dev)
|
|||
|
||||
INIT_WORK(&fbdev->work, pan_worker);
|
||||
|
||||
ret = omap_fbdev_client_hotplug(&helper->client);
|
||||
if (ret)
|
||||
drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
|
||||
|
||||
drm_client_register(&helper->client);
|
||||
|
||||
return;
|
||||
|
|
|
@ -2178,6 +2178,7 @@ static const struct panel_desc innolux_at043tn24 = {
|
|||
.height = 54,
|
||||
},
|
||||
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
|
||||
.connector_type = DRM_MODE_CONNECTOR_DPI,
|
||||
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
|
||||
};
|
||||
|
||||
|
@ -3202,6 +3203,7 @@ static const struct drm_display_mode powertip_ph800480t013_idf02_mode = {
|
|||
.vsync_start = 480 + 49,
|
||||
.vsync_end = 480 + 49 + 2,
|
||||
.vtotal = 480 + 49 + 2 + 22,
|
||||
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
|
||||
};
|
||||
|
||||
static const struct panel_desc powertip_ph800480t013_idf02 = {
|
||||
|
|
|
@ -383,10 +383,6 @@ void radeon_fbdev_setup(struct radeon_device *rdev)
|
|||
goto err_drm_client_init;
|
||||
}
|
||||
|
||||
ret = radeon_fbdev_client_hotplug(&fb_helper->client);
|
||||
if (ret)
|
||||
drm_dbg_kms(rdev->ddev, "client hotplug ret=%d\n", ret);
|
||||
|
||||
drm_client_register(&fb_helper->client);
|
||||
|
||||
return;
|
||||
|
|
|
@ -176,16 +176,32 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
|
|||
{
|
||||
struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
|
||||
finish_cb);
|
||||
int r;
|
||||
unsigned long index;
|
||||
|
||||
dma_fence_put(f);
|
||||
|
||||
/* Wait for all dependencies to avoid data corruptions */
|
||||
while (!xa_empty(&job->dependencies)) {
|
||||
f = xa_erase(&job->dependencies, job->last_dependency++);
|
||||
r = dma_fence_add_callback(f, &job->finish_cb,
|
||||
drm_sched_entity_kill_jobs_cb);
|
||||
if (!r)
|
||||
xa_for_each(&job->dependencies, index, f) {
|
||||
struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
|
||||
|
||||
if (s_fence && f == &s_fence->scheduled) {
|
||||
/* The dependencies array had a reference on the scheduled
|
||||
* fence, and the finished fence refcount might have
|
||||
* dropped to zero. Use dma_fence_get_rcu() so we get
|
||||
* a NULL fence in that case.
|
||||
*/
|
||||
f = dma_fence_get_rcu(&s_fence->finished);
|
||||
|
||||
/* Now that we have a reference on the finished fence,
|
||||
* we can release the reference the dependencies array
|
||||
* had on the scheduled fence.
|
||||
*/
|
||||
dma_fence_put(&s_fence->scheduled);
|
||||
}
|
||||
|
||||
xa_erase(&job->dependencies, index);
|
||||
if (f && !dma_fence_add_callback(f, &job->finish_cb,
|
||||
drm_sched_entity_kill_jobs_cb))
|
||||
return;
|
||||
|
||||
dma_fence_put(f);
|
||||
|
@ -415,8 +431,17 @@ static struct dma_fence *
|
|||
drm_sched_job_dependency(struct drm_sched_job *job,
|
||||
struct drm_sched_entity *entity)
|
||||
{
|
||||
if (!xa_empty(&job->dependencies))
|
||||
return xa_erase(&job->dependencies, job->last_dependency++);
|
||||
struct dma_fence *f;
|
||||
|
||||
/* We keep the fence around, so we can iterate over all dependencies
|
||||
* in drm_sched_entity_kill_jobs_cb() to ensure all deps are signaled
|
||||
* before killing the job.
|
||||
*/
|
||||
f = xa_load(&job->dependencies, job->last_dependency);
|
||||
if (f) {
|
||||
job->last_dependency++;
|
||||
return dma_fence_get(f);
|
||||
}
|
||||
|
||||
if (job->sched->ops->prepare_job)
|
||||
return job->sched->ops->prepare_job(job, entity);
|
||||
|
|
|
@ -48,8 +48,32 @@ static void __exit drm_sched_fence_slab_fini(void)
|
|||
kmem_cache_destroy(sched_fence_slab);
|
||||
}
|
||||
|
||||
void drm_sched_fence_scheduled(struct drm_sched_fence *fence)
|
||||
static void drm_sched_fence_set_parent(struct drm_sched_fence *s_fence,
|
||||
struct dma_fence *fence)
|
||||
{
|
||||
/*
|
||||
* smp_store_release() to ensure another thread racing us
|
||||
* in drm_sched_fence_set_deadline_finished() sees the
|
||||
* fence's parent set before test_bit()
|
||||
*/
|
||||
smp_store_release(&s_fence->parent, dma_fence_get(fence));
|
||||
if (test_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT,
|
||||
&s_fence->finished.flags))
|
||||
dma_fence_set_deadline(fence, s_fence->deadline);
|
||||
}
|
||||
|
||||
void drm_sched_fence_scheduled(struct drm_sched_fence *fence,
|
||||
struct dma_fence *parent)
|
||||
{
|
||||
/* Set the parent before signaling the scheduled fence, such that,
|
||||
* any waiter expecting the parent to be filled after the job has
|
||||
* been scheduled (which is the case for drivers delegating waits
|
||||
* to some firmware) doesn't have to busy wait for parent to show
|
||||
* up.
|
||||
*/
|
||||
if (!IS_ERR_OR_NULL(parent))
|
||||
drm_sched_fence_set_parent(fence, parent);
|
||||
|
||||
dma_fence_signal(&fence->scheduled);
|
||||
}
|
||||
|
||||
|
@ -181,20 +205,6 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f)
|
|||
}
|
||||
EXPORT_SYMBOL(to_drm_sched_fence);
|
||||
|
||||
void drm_sched_fence_set_parent(struct drm_sched_fence *s_fence,
|
||||
struct dma_fence *fence)
|
||||
{
|
||||
/*
|
||||
* smp_store_release() to ensure another thread racing us
|
||||
* in drm_sched_fence_set_deadline_finished() sees the
|
||||
* fence's parent set before test_bit()
|
||||
*/
|
||||
smp_store_release(&s_fence->parent, dma_fence_get(fence));
|
||||
if (test_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT,
|
||||
&s_fence->finished.flags))
|
||||
dma_fence_set_deadline(fence, s_fence->deadline);
|
||||
}
|
||||
|
||||
struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *entity,
|
||||
void *owner)
|
||||
{
|
||||
|
|
|
@ -1043,10 +1043,9 @@ static int drm_sched_main(void *param)
|
|||
trace_drm_run_job(sched_job, entity);
|
||||
fence = sched->ops->run_job(sched_job);
|
||||
complete_all(&entity->entity_idle);
|
||||
drm_sched_fence_scheduled(s_fence);
|
||||
drm_sched_fence_scheduled(s_fence, fence);
|
||||
|
||||
if (!IS_ERR_OR_NULL(fence)) {
|
||||
drm_sched_fence_set_parent(s_fence, fence);
|
||||
/* Drop for original kref_init of the fence */
|
||||
dma_fence_put(fence);
|
||||
|
||||
|
|
|
@ -225,10 +225,6 @@ void tegra_fbdev_setup(struct drm_device *dev)
|
|||
if (ret)
|
||||
goto err_drm_client_init;
|
||||
|
||||
ret = tegra_fbdev_client_hotplug(&helper->client);
|
||||
if (ret)
|
||||
drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
|
||||
|
||||
drm_client_register(&helper->client);
|
||||
|
||||
return;
|
||||
|
|
|
@ -458,18 +458,18 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
|
|||
goto out;
|
||||
}
|
||||
|
||||
bounce:
|
||||
ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
|
||||
if (ret == -EMULTIHOP) {
|
||||
do {
|
||||
ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
|
||||
if (ret != -EMULTIHOP)
|
||||
break;
|
||||
|
||||
ret = ttm_bo_bounce_temp_buffer(bo, &evict_mem, ctx, &hop);
|
||||
if (ret) {
|
||||
if (ret != -ERESTARTSYS && ret != -EINTR)
|
||||
pr_err("Buffer eviction failed\n");
|
||||
ttm_resource_free(bo, &evict_mem);
|
||||
goto out;
|
||||
}
|
||||
/* try and move to final place now. */
|
||||
goto bounce;
|
||||
} while (!ret);
|
||||
|
||||
if (ret) {
|
||||
ttm_resource_free(bo, &evict_mem);
|
||||
if (ret != -ERESTARTSYS && ret != -EINTR)
|
||||
pr_err("Buffer eviction failed\n");
|
||||
}
|
||||
out:
|
||||
return ret;
|
||||
|
@ -517,6 +517,12 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
|
|||
{
|
||||
bool ret = false;
|
||||
|
||||
if (bo->pin_count) {
|
||||
*locked = false;
|
||||
*busy = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (bo->base.resv == ctx->resv) {
|
||||
dma_resv_assert_held(bo->base.resv);
|
||||
if (ctx->allow_res_evict)
|
||||
|
@ -1167,6 +1173,7 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
|
|||
ret = ttm_bo_handle_move_mem(bo, evict_mem, true, &ctx, &hop);
|
||||
if (unlikely(ret != 0)) {
|
||||
WARN(ret == -EMULTIHOP, "Unexpected multihop in swaput - likely driver bug.\n");
|
||||
ttm_resource_free(bo, &evict_mem);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -86,6 +86,8 @@ static void ttm_lru_bulk_move_pos_tail(struct ttm_lru_bulk_move_pos *pos,
|
|||
struct ttm_resource *res)
|
||||
{
|
||||
if (pos->last != res) {
|
||||
if (pos->first == res)
|
||||
pos->first = list_next_entry(res, lru);
|
||||
list_move(&res->lru, &pos->last->lru);
|
||||
pos->last = res;
|
||||
}
|
||||
|
@ -111,7 +113,8 @@ static void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk,
|
|||
{
|
||||
struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res);
|
||||
|
||||
if (unlikely(pos->first == res && pos->last == res)) {
|
||||
if (unlikely(WARN_ON(!pos->first || !pos->last) ||
|
||||
(pos->first == res && pos->last == res))) {
|
||||
pos->first = NULL;
|
||||
pos->last = NULL;
|
||||
} else if (pos->first == res) {
|
||||
|
|
|
@ -583,15 +583,14 @@ void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
|
|||
bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
|
||||
int drm_sched_entity_error(struct drm_sched_entity *entity);
|
||||
|
||||
void drm_sched_fence_set_parent(struct drm_sched_fence *s_fence,
|
||||
struct dma_fence *fence);
|
||||
struct drm_sched_fence *drm_sched_fence_alloc(
|
||||
struct drm_sched_entity *s_entity, void *owner);
|
||||
void drm_sched_fence_init(struct drm_sched_fence *fence,
|
||||
struct drm_sched_entity *entity);
|
||||
void drm_sched_fence_free(struct drm_sched_fence *fence);
|
||||
|
||||
void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
|
||||
void drm_sched_fence_scheduled(struct drm_sched_fence *fence,
|
||||
struct dma_fence *parent);
|
||||
void drm_sched_fence_finished(struct drm_sched_fence *fence, int result);
|
||||
|
||||
unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
|
||||
|
|
|
@ -606,7 +606,7 @@ static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr)
|
|||
void dma_fence_set_deadline(struct dma_fence *fence, ktime_t deadline);
|
||||
|
||||
struct dma_fence *dma_fence_get_stub(void);
|
||||
struct dma_fence *dma_fence_allocate_private_stub(void);
|
||||
struct dma_fence *dma_fence_allocate_private_stub(ktime_t timestamp);
|
||||
u64 dma_fence_context_alloc(unsigned num);
|
||||
|
||||
extern const struct dma_fence_ops dma_fence_array_ops;
|
||||
|
|
Loading…
Reference in a new issue