amd-drm-next-6.9-2024-02-19:

amdgpu:
 - ATHUB 4.1 support
 - EEPROM support updates
 - RAS updates
 - LSDMA 7.0 support
 - JPEG DPG support
 - IH 7.0 support
 - HDP 7.0 support
 - VCN 5.0 support
 - Misc display fixes
 - Retimer fixes
 - DCN 3.5 fixes
 - VCN 4.x fixes
 - PSR fixes
 - PSP 14.0 support
 - VA_RESERVED cleanup
 - SMU 13.0.6 updates
 - NBIO 7.11 updates
 - SDMA 6.1 updates
 - MMHUB 3.3 updates
 - Suspend/resume fixes
 - DMUB updates
 
 amdkfd:
 - Trap handler enhancements
 - Fix cache size reporting
 - Relocate the trap handler
 
 radeon:
 - fix typo in print statement
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQQgO5Idg2tXNTSZAr293/aFa7yZ2AUCZdPLUgAKCRC93/aFa7yZ
 2AakAQDmhlQkJAxIxJw4/5mEQY5zaMJ033lcZGzBQbj8uL42pQD/aQ/gdN/bOfPZ
 gsdidzgL5MThBOFfw72pBkEoE+kQXgc=
 =oP2J
 -----END PGP SIGNATURE-----

Merge tag 'amd-drm-next-6.9-2024-02-19' of https://gitlab.freedesktop.org/agd5f/linux into drm-next

amd-drm-next-6.9-2024-02-19:

amdgpu:
- ATHUB 4.1 support
- EEPROM support updates
- RAS updates
- LSDMA 7.0 support
- JPEG DPG support
- IH 7.0 support
- HDP 7.0 support
- VCN 5.0 support
- Misc display fixes
- Retimer fixes
- DCN 3.5 fixes
- VCN 4.x fixes
- PSR fixes
- PSP 14.0 support
- VA_RESERVED cleanup
- SMU 13.0.6 updates
- NBIO 7.11 updates
- SDMA 6.1 updates
- MMHUB 3.3 updates
- Suspend/resume fixes
- DMUB updates

amdkfd:
- Trap handler enhancements
- Fix cache size reporting
- Relocate the trap handler

radeon:
- fix typo in print statement

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240219214810.4911-1-alexander.deucher@amd.com
This commit is contained in:
Dave Airlie 2024-02-22 10:08:22 +10:00
commit 40d47c5fb4
117 changed files with 21298 additions and 409 deletions

View File

@ -98,7 +98,7 @@ amdgpu-y += \
vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o arct_reg_init.o mxgpu_nv.o \
nbio_v7_2.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o soc21.o \
sienna_cichlid.o smu_v13_0_10.o nbio_v4_3.o hdp_v6_0.o nbio_v7_7.o hdp_v5_2.o lsdma_v6_0.o \
nbio_v7_9.o aqua_vanjaram.o nbio_v7_11.o
nbio_v7_9.o aqua_vanjaram.o nbio_v7_11.o lsdma_v7_0.o hdp_v7_0.o
# add DF block
amdgpu-y += \
@ -132,7 +132,8 @@ amdgpu-y += \
vega20_ih.o \
navi10_ih.o \
ih_v6_0.o \
ih_v6_1.o
ih_v6_1.o \
ih_v7_0.o
# add PSP block
amdgpu-y += \
@ -143,7 +144,8 @@ amdgpu-y += \
psp_v11_0_8.o \
psp_v12_0.o \
psp_v13_0.o \
psp_v13_0_4.o
psp_v13_0_4.o \
psp_v14_0.o
# add DCE block
amdgpu-y += \
@ -208,6 +210,7 @@ amdgpu-y += \
vcn_v4_0.o \
vcn_v4_0_3.o \
vcn_v4_0_5.o \
vcn_v5_0_0.o \
amdgpu_jpeg.o \
jpeg_v1_0.o \
jpeg_v2_0.o \
@ -215,7 +218,8 @@ amdgpu-y += \
jpeg_v3_0.o \
jpeg_v4_0.o \
jpeg_v4_0_3.o \
jpeg_v4_0_5.o
jpeg_v4_0_5.o \
jpeg_v5_0_0.o
# add VPE block
amdgpu-y += \
@ -233,7 +237,8 @@ amdgpu-y += \
athub_v1_0.o \
athub_v2_0.o \
athub_v2_1.o \
athub_v3_0.o
athub_v3_0.o \
athub_v4_1_0.o
# add SMUIO block
amdgpu-y += \

View File

@ -196,8 +196,9 @@ extern int amdgpu_smu_pptable_id;
extern uint amdgpu_dc_feature_mask;
extern uint amdgpu_dc_debug_mask;
extern uint amdgpu_dc_visual_confirm;
extern uint amdgpu_dm_abm_level;
extern int amdgpu_dm_abm_level;
extern int amdgpu_backlight;
extern int amdgpu_damage_clips;
extern struct amdgpu_mgpu_info mgpu_info;
extern int amdgpu_ras_enable;
extern uint amdgpu_ras_mask;
@ -1095,6 +1096,7 @@ struct amdgpu_device {
long sdma_timeout;
long video_timeout;
long compute_timeout;
long psp_timeout;
uint64_t unique_id;
uint64_t df_perfmon_config_assign_mask[AMDGPU_MAX_DF_PERFMONS];
@ -1551,9 +1553,11 @@ static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
void amdgpu_choose_low_power_state(struct amdgpu_device *adev);
#else
static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
static inline void amdgpu_choose_low_power_state(struct amdgpu_device *adev) { }
#endif
#if defined(CONFIG_DRM_AMD_DC)

View File

@ -1519,4 +1519,19 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
#endif /* CONFIG_AMD_PMC */
}
/**
* amdgpu_choose_low_power_state
*
* @adev: amdgpu_device_pointer
*
* Choose the target low power state for the GPU
*/
void amdgpu_choose_low_power_state(struct amdgpu_device *adev)
{
if (amdgpu_acpi_is_s0ix_active(adev))
adev->in_s0ix = true;
else if (amdgpu_acpi_is_s3_active(adev))
adev->in_s3 = true;
}
#endif /* CONFIG_SUSPEND */

View File

@ -28,9 +28,8 @@
uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
{
uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT;
uint64_t addr = AMDGPU_VA_RESERVED_CSA_START(adev);
addr -= AMDGPU_VA_RESERVED_CSA_SIZE;
addr = amdgpu_gmc_sign_extend(addr);
return addr;

View File

@ -4529,13 +4529,15 @@ int amdgpu_device_prepare(struct drm_device *dev)
struct amdgpu_device *adev = drm_to_adev(dev);
int i, r;
amdgpu_choose_low_power_state(adev);
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
/* Evict the majority of BOs before starting suspend sequence */
r = amdgpu_device_evict_resources(adev);
if (r)
return r;
goto unprepare;
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid)
@ -4544,10 +4546,15 @@ int amdgpu_device_prepare(struct drm_device *dev)
continue;
r = adev->ip_blocks[i].version->funcs->prepare_suspend((void *)adev);
if (r)
return r;
goto unprepare;
}
return 0;
unprepare:
adev->in_s0ix = adev->in_s3 = false;
return r;
}
/**
@ -4584,7 +4591,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
cancel_delayed_work_sync(&adev->delayed_init_work);
flush_delayed_work(&adev->gfx.gfx_off_delay_work);
amdgpu_ras_suspend(adev);

View File

@ -64,17 +64,20 @@
#include "hdp_v5_0.h"
#include "hdp_v5_2.h"
#include "hdp_v6_0.h"
#include "hdp_v7_0.h"
#include "nv.h"
#include "soc21.h"
#include "navi10_ih.h"
#include "ih_v6_0.h"
#include "ih_v6_1.h"
#include "ih_v7_0.h"
#include "gfx_v10_0.h"
#include "gfx_v11_0.h"
#include "sdma_v5_0.h"
#include "sdma_v5_2.h"
#include "sdma_v6_0.h"
#include "lsdma_v6_0.h"
#include "lsdma_v7_0.h"
#include "vcn_v2_0.h"
#include "jpeg_v2_0.h"
#include "vcn_v3_0.h"
@ -93,6 +96,8 @@
#include "smuio_v13_0.h"
#include "smuio_v13_0_3.h"
#include "smuio_v13_0_6.h"
#include "vcn_v5_0_0.h"
#include "jpeg_v5_0_0.h"
#include "amdgpu_vpe.h"
@ -1767,6 +1772,9 @@ static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(6, 1, 0):
amdgpu_device_ip_block_add(adev, &ih_v6_1_ip_block);
break;
case IP_VERSION(7, 0, 0):
amdgpu_device_ip_block_add(adev, &ih_v7_0_ip_block);
break;
default:
dev_err(adev->dev,
"Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n",
@ -1816,11 +1824,16 @@ static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 10):
case IP_VERSION(13, 0, 11):
case IP_VERSION(14, 0, 0):
case IP_VERSION(14, 0, 1):
amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
break;
case IP_VERSION(13, 0, 4):
amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block);
break;
case IP_VERSION(14, 0, 2):
case IP_VERSION(14, 0, 3):
amdgpu_device_ip_block_add(adev, &psp_v14_0_ip_block);
break;
default:
dev_err(adev->dev,
"Failed to add psp ip block(MP0_HWIP:0x%x)\n",
@ -2037,6 +2050,7 @@ static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(6, 0, 2):
case IP_VERSION(6, 0, 3):
case IP_VERSION(6, 1, 0):
case IP_VERSION(6, 1, 1):
amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block);
break;
default:
@ -2126,6 +2140,10 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &vcn_v4_0_5_ip_block);
amdgpu_device_ip_block_add(adev, &jpeg_v4_0_5_ip_block);
break;
case IP_VERSION(5, 0, 0):
amdgpu_device_ip_block_add(adev, &vcn_v5_0_0_ip_block);
amdgpu_device_ip_block_add(adev, &jpeg_v5_0_0_ip_block);
break;
default:
dev_err(adev->dev,
"Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
@ -2497,6 +2515,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->nbio.hdp_flush_reg = &nbio_v7_9_hdp_flush_reg;
break;
case IP_VERSION(7, 11, 0):
case IP_VERSION(7, 11, 1):
adev->nbio.funcs = &nbio_v7_11_funcs;
adev->nbio.hdp_flush_reg = &nbio_v7_11_hdp_flush_reg;
break;
@ -2564,6 +2583,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(6, 1, 0):
adev->hdp.funcs = &hdp_v6_0_funcs;
break;
case IP_VERSION(7, 0, 0):
adev->hdp.funcs = &hdp_v7_0_funcs;
break;
default:
break;
}
@ -2628,6 +2650,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 8):
case IP_VERSION(14, 0, 0):
case IP_VERSION(14, 0, 1):
adev->smuio.funcs = &smuio_v13_0_6_funcs;
break;
default:
@ -2641,6 +2664,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(6, 0, 3):
adev->lsdma.funcs = &lsdma_v6_0_funcs;
break;
case IP_VERSION(7, 0, 0):
case IP_VERSION(7, 0, 1):
adev->lsdma.funcs = &lsdma_v7_0_funcs;
break;
default:
break;
}

View File

@ -211,6 +211,7 @@ int amdgpu_seamless = -1; /* auto */
uint amdgpu_debug_mask;
int amdgpu_agp = -1; /* auto */
int amdgpu_wbrf = -1;
int amdgpu_damage_clips = -1; /* auto */
static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work);
@ -848,17 +849,30 @@ module_param_named(visualconfirm, amdgpu_dc_visual_confirm, uint, 0444);
* the ABM algorithm, with 1 being the least reduction and 4 being the most
* reduction.
*
* Defaults to 0, or disabled. Userspace can still override this level later
* after boot.
* Defaults to -1, or disabled. Userspace can only override this level after
* boot if it's set to auto.
*/
uint amdgpu_dm_abm_level;
MODULE_PARM_DESC(abmlevel, "ABM level (0 = off (default), 1-4 = backlight reduction level) ");
module_param_named(abmlevel, amdgpu_dm_abm_level, uint, 0444);
int amdgpu_dm_abm_level = -1;
MODULE_PARM_DESC(abmlevel,
"ABM level (0 = off, 1-4 = backlight reduction level, -1 auto (default))");
module_param_named(abmlevel, amdgpu_dm_abm_level, int, 0444);
int amdgpu_backlight = -1;
MODULE_PARM_DESC(backlight, "Backlight control (0 = pwm, 1 = aux, -1 auto (default))");
module_param_named(backlight, amdgpu_backlight, bint, 0444);
/**
* DOC: damageclips (int)
* Enable or disable damage clips support. If damage clips support is disabled,
* we will force full frame updates, irrespective of what user space sends to
* us.
*
* Defaults to -1 (where it is enabled unless a PSR-SU display is detected).
*/
MODULE_PARM_DESC(damageclips,
"Damage clips support (0 = disable, 1 = enable, -1 auto (default))");
module_param_named(damageclips, amdgpu_damage_clips, int, 0444);
/**
* DOC: tmz (int)
* Trusted Memory Zone (TMZ) is a method to protect data being written

View File

@ -724,8 +724,15 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
if (adev->gfx.gfx_off_req_count == 0 &&
!adev->gfx.gfx_off_state) {
schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
/* If going to s2idle, no need to wait */
if (adev->in_s0ix) {
if (!amdgpu_dpm_set_powergating_by_smu(adev,
AMD_IP_BLOCK_TYPE_GFX, true))
adev->gfx.gfx_off_state = true;
} else {
schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
delay);
}
}
} else {
if (adev->gfx.gfx_off_req_count == 0) {

View File

@ -36,10 +36,35 @@ static void amdgpu_jpeg_idle_work_handler(struct work_struct *work);
int amdgpu_jpeg_sw_init(struct amdgpu_device *adev)
{
int i, r;
INIT_DELAYED_WORK(&adev->jpeg.idle_work, amdgpu_jpeg_idle_work_handler);
mutex_init(&adev->jpeg.jpeg_pg_lock);
atomic_set(&adev->jpeg.total_submission_cnt, 0);
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_JPEG_DPG))
adev->jpeg.indirect_sram = true;
for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
if (adev->jpeg.harvest_config & (1 << i))
continue;
if (adev->jpeg.indirect_sram) {
r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
&adev->jpeg.inst[i].dpg_sram_bo,
&adev->jpeg.inst[i].dpg_sram_gpu_addr,
&adev->jpeg.inst[i].dpg_sram_cpu_addr);
if (r) {
dev_err(adev->dev,
"JPEG %d (%d) failed to allocate DPG bo\n", i, r);
return r;
}
}
}
return 0;
}
@ -51,6 +76,11 @@ int amdgpu_jpeg_sw_fini(struct amdgpu_device *adev)
if (adev->jpeg.harvest_config & (1 << i))
continue;
amdgpu_bo_free_kernel(
&adev->jpeg.inst[i].dpg_sram_bo,
&adev->jpeg.inst[i].dpg_sram_gpu_addr,
(void **)&adev->jpeg.inst[i].dpg_sram_cpu_addr);
for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j)
amdgpu_ring_fini(&adev->jpeg.inst[i].ring_dec[j]);
}
@ -210,12 +240,15 @@ int amdgpu_jpeg_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
} else {
r = 0;
}
if (!amdgpu_sriov_vf(adev)) {
for (i = 0; i < adev->usec_timeout; i++) {
tmp = RREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch[ring->pipe]);
if (tmp == 0xDEADBEEF)
break;
udelay(1);
if (amdgpu_emu_mode == 1)
udelay(10);
}
if (i >= adev->usec_timeout)
@ -296,3 +329,16 @@ int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev)
return 0;
}
int amdgpu_jpeg_psp_update_sram(struct amdgpu_device *adev, int inst_idx,
enum AMDGPU_UCODE_ID ucode_id)
{
struct amdgpu_firmware_info ucode = {
.ucode_id = AMDGPU_UCODE_ID_JPEG_RAM,
.mc_addr = adev->jpeg.inst[inst_idx].dpg_sram_gpu_addr,
.ucode_size = ((uintptr_t)adev->jpeg.inst[inst_idx].dpg_sram_curr_addr -
(uintptr_t)adev->jpeg.inst[inst_idx].dpg_sram_cpu_addr),
};
return psp_execute_ip_fw_load(&adev->psp, &ucode);
}

View File

@ -32,6 +32,34 @@
#define AMDGPU_JPEG_HARVEST_JPEG0 (1 << 0)
#define AMDGPU_JPEG_HARVEST_JPEG1 (1 << 1)
#define WREG32_SOC15_JPEG_DPG_MODE(inst_idx, offset, value, indirect) \
do { \
if (!indirect) { \
WREG32_SOC15(JPEG, GET_INST(JPEG, inst_idx), \
mmUVD_DPG_LMA_DATA, value); \
WREG32_SOC15( \
JPEG, GET_INST(JPEG, inst_idx), \
mmUVD_DPG_LMA_CTL, \
(UVD_DPG_LMA_CTL__READ_WRITE_MASK | \
offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT | \
indirect << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \
} else { \
*adev->jpeg.inst[inst_idx].dpg_sram_curr_addr++ = \
offset; \
*adev->jpeg.inst[inst_idx].dpg_sram_curr_addr++ = \
value; \
} \
} while (0)
#define RREG32_SOC15_JPEG_DPG_MODE(inst_idx, offset, mask_en) \
({ \
WREG32_SOC15(JPEG, inst_idx, mmUVD_DPG_LMA_CTL, \
(0x0 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \
mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \
offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \
RREG32_SOC15(JPEG, inst_idx, mmUVD_DPG_LMA_DATA); \
})
struct amdgpu_jpeg_reg{
unsigned jpeg_pitch[AMDGPU_MAX_JPEG_RINGS];
};
@ -41,6 +69,11 @@ struct amdgpu_jpeg_inst {
struct amdgpu_irq_src irq;
struct amdgpu_irq_src ras_poison_irq;
struct amdgpu_jpeg_reg external;
struct amdgpu_bo *dpg_sram_bo;
struct dpg_pause_state pause_state;
void *dpg_sram_cpu_addr;
uint64_t dpg_sram_gpu_addr;
uint32_t *dpg_sram_curr_addr;
uint8_t aid_id;
};
@ -63,6 +96,7 @@ struct amdgpu_jpeg {
uint16_t inst_mask;
uint8_t num_inst_per_aid;
bool indirect_sram;
};
int amdgpu_jpeg_sw_init(struct amdgpu_device *adev);
@ -82,5 +116,7 @@ int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev,
int amdgpu_jpeg_ras_late_init(struct amdgpu_device *adev,
struct ras_common_if *ras_block);
int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev);
int amdgpu_jpeg_psp_update_sram(struct amdgpu_device *adev, int inst_idx,
enum AMDGPU_UCODE_ID ucode_id);
#endif /*__AMDGPU_JPEG_H__*/

View File

@ -38,6 +38,7 @@
#include "psp_v12_0.h"
#include "psp_v13_0.h"
#include "psp_v13_0_4.h"
#include "psp_v14_0.h"
#include "amdgpu_ras.h"
#include "amdgpu_securedisplay.h"
@ -162,20 +163,26 @@ static int psp_early_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct psp_context *psp = &adev->psp;
psp->autoload_supported = true;
psp->boot_time_tmr = true;
switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
case IP_VERSION(9, 0, 0):
psp_v3_1_set_psp_funcs(psp);
psp->autoload_supported = false;
psp->boot_time_tmr = false;
break;
case IP_VERSION(10, 0, 0):
case IP_VERSION(10, 0, 1):
psp_v10_0_set_psp_funcs(psp);
psp->autoload_supported = false;
psp->boot_time_tmr = false;
break;
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 4):
psp_v11_0_set_psp_funcs(psp);
psp->autoload_supported = false;
psp->boot_time_tmr = false;
break;
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 7):
@ -188,15 +195,20 @@ static int psp_early_init(void *handle)
case IP_VERSION(11, 0, 12):
case IP_VERSION(11, 0, 13):
psp_v11_0_set_psp_funcs(psp);
psp->autoload_supported = true;
psp->boot_time_tmr = false;
break;
case IP_VERSION(11, 0, 3):
case IP_VERSION(12, 0, 1):
psp_v12_0_set_psp_funcs(psp);
psp->autoload_supported = false;
psp->boot_time_tmr = false;
break;
case IP_VERSION(13, 0, 2):
psp->boot_time_tmr = false;
fallthrough;
case IP_VERSION(13, 0, 6):
psp_v13_0_set_psp_funcs(psp);
psp->autoload_supported = false;
break;
case IP_VERSION(13, 0, 1):
case IP_VERSION(13, 0, 3):
@ -204,25 +216,31 @@ static int psp_early_init(void *handle)
case IP_VERSION(13, 0, 8):
case IP_VERSION(13, 0, 11):
case IP_VERSION(14, 0, 0):
case IP_VERSION(14, 0, 1):
psp_v13_0_set_psp_funcs(psp);
psp->autoload_supported = true;
psp->boot_time_tmr = false;
break;
case IP_VERSION(11, 0, 8):
if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
psp_v11_0_8_set_psp_funcs(psp);
psp->autoload_supported = false;
}
psp->autoload_supported = false;
psp->boot_time_tmr = false;
break;
case IP_VERSION(13, 0, 0):
case IP_VERSION(13, 0, 7):
case IP_VERSION(13, 0, 10):
psp_v13_0_set_psp_funcs(psp);
psp->autoload_supported = true;
adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
psp->boot_time_tmr = false;
break;
case IP_VERSION(13, 0, 4):
psp_v13_0_4_set_psp_funcs(psp);
psp->autoload_supported = true;
psp->boot_time_tmr = false;
break;
case IP_VERSION(14, 0, 2):
case IP_VERSION(14, 0, 3):
psp_v14_0_set_psp_funcs(psp);
break;
default:
return -EINVAL;
@ -230,6 +248,8 @@ static int psp_early_init(void *handle)
psp->adev = adev;
adev->psp_timeout = 20000;
psp_check_pmfw_centralized_cstate_management(psp);
if (amdgpu_sriov_vf(adev))
@ -627,7 +647,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
{
int ret;
int index;
int timeout = 20000;
int timeout = psp->adev->psp_timeout;
bool ras_intr = false;
bool skip_unsupport = false;
@ -774,16 +794,6 @@ static int psp_load_toc(struct psp_context *psp,
return ret;
}
static bool psp_boottime_tmr(struct psp_context *psp)
{
switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) {
case IP_VERSION(13, 0, 6):
return true;
default:
return false;
}
}
/* Set up Trusted Memory Region */
static int psp_tmr_init(struct psp_context *psp)
{
@ -815,7 +825,7 @@ static int psp_tmr_init(struct psp_context *psp)
}
}
if (!psp->tmr_bo) {
if (!psp->tmr_bo && !psp->boot_time_tmr) {
pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
ret = amdgpu_bo_create_kernel(psp->adev, tmr_size,
PSP_TMR_ALIGNMENT,
@ -2251,7 +2261,7 @@ static int psp_hw_start(struct psp_context *psp)
if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
goto skip_pin_bo;
if (!psp_boottime_tmr(psp)) {
if (!psp->boot_time_tmr || psp->autoload_supported) {
ret = psp_tmr_init(psp);
if (ret) {
dev_err(adev->dev, "PSP tmr init failed!\n");
@ -2271,10 +2281,12 @@ skip_pin_bo:
return ret;
}
ret = psp_tmr_load(psp);
if (ret) {
dev_err(adev->dev, "PSP load tmr failed!\n");
return ret;
if (!psp->boot_time_tmr || !psp->autoload_supported) {
ret = psp_tmr_load(psp);
if (ret) {
dev_err(adev->dev, "PSP load tmr failed!\n");
return ret;
}
}
return 0;
@ -2485,6 +2497,9 @@ static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
case AMDGPU_UCODE_ID_P2S_TABLE:
*type = GFX_FW_TYPE_P2S_TABLE;
break;
case AMDGPU_UCODE_ID_JPEG_RAM:
*type = GFX_FW_TYPE_JPEG_RAM;
break;
case AMDGPU_UCODE_ID_MAXIMUM:
default:
return -EINVAL;
@ -3956,3 +3971,11 @@ const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = {
.rev = 4,
.funcs = &psp_ip_funcs,
};
const struct amdgpu_ip_block_version psp_v14_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_PSP,
.major = 14,
.minor = 0,
.rev = 0,
.funcs = &psp_ip_funcs,
};

View File

@ -203,7 +203,7 @@ struct psp_ras_context {
#define GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES 0x1000
#define GDDR6_MEM_TRAINING_OFFSET 0x8000
/*Define the VRAM size that will be encroached by BIST training.*/
#define GDDR6_MEM_TRAINING_ENCROACHED_SIZE 0x2000000
#define BIST_MEM_TRAINING_ENCROACHED_SIZE 0x2000000
enum psp_memory_training_init_flag {
PSP_MEM_TRAIN_NOT_SUPPORT = 0x0,
@ -364,6 +364,8 @@ struct psp_context {
atomic_t fence_value;
/* flag to mark whether gfx fw autoload is supported or not */
bool autoload_supported;
/* flag to mark whether psp use runtime TMR or boottime TMR */
bool boot_time_tmr;
/* flag to mark whether df cstate management centralized to PMFW */
bool pmfw_centralized_cstate_management;
@ -463,6 +465,7 @@ extern const struct amdgpu_ip_block_version psp_v11_0_8_ip_block;
extern const struct amdgpu_ip_block_version psp_v12_0_ip_block;
extern const struct amdgpu_ip_block_version psp_v13_0_ip_block;
extern const struct amdgpu_ip_block_version psp_v13_0_4_ip_block;
extern const struct amdgpu_ip_block_version psp_v14_0_ip_block;
extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
uint32_t field_val, uint32_t mask, bool check_changed);

View File

@ -735,6 +735,9 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
control->tbl_rai.rma_status = GPU_RETIRED__ECC_REACH_THRESHOLD;
control->tbl_rai.health_percent = 0;
}
/* ignore the -ENOTSUPP return value */
amdgpu_dpm_send_rma_reason(adev);
}
if (control->tbl_hdr.version == RAS_TABLE_VER_V2_1)

View File

@ -45,11 +45,7 @@
*/
static inline u64 amdgpu_seq64_get_va_base(struct amdgpu_device *adev)
{
u64 addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT;
addr -= AMDGPU_VA_RESERVED_TOP;
return addr;
return AMDGPU_VA_RESERVED_SEQ64_START(adev);
}
/**

View File

@ -556,6 +556,8 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
default:
if (!load_type)
return AMDGPU_FW_LOAD_DIRECT;
else if (load_type == 3)
return AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO;
else
return AMDGPU_FW_LOAD_PSP;
}
@ -678,6 +680,8 @@ const char *amdgpu_ucode_name(enum AMDGPU_UCODE_ID ucode_id)
return "UMSCH_MM_DATA";
case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER:
return "UMSCH_MM_CMD_BUFFER";
case AMDGPU_UCODE_ID_JPEG_RAM:
return "JPEG";
default:
return "UNKNOWN UCODE";
}
@ -1060,7 +1064,8 @@ static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode,
int amdgpu_ucode_create_bo(struct amdgpu_device *adev)
{
if (adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT) {
if ((adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT) &&
(adev->firmware.load_type != AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)) {
amdgpu_bo_create_kernel(adev, adev->firmware.fw_size, PAGE_SIZE,
(amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ?
AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,

View File

@ -511,6 +511,7 @@ enum AMDGPU_UCODE_ID {
AMDGPU_UCODE_ID_UMSCH_MM_DATA,
AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER,
AMDGPU_UCODE_ID_P2S_TABLE,
AMDGPU_UCODE_ID_JPEG_RAM,
AMDGPU_UCODE_ID_MAXIMUM,
};

View File

@ -59,6 +59,7 @@
#define FIRMWARE_VCN4_0_3 "amdgpu/vcn_4_0_3.bin"
#define FIRMWARE_VCN4_0_4 "amdgpu/vcn_4_0_4.bin"
#define FIRMWARE_VCN4_0_5 "amdgpu/vcn_4_0_5.bin"
#define FIRMWARE_VCN5_0_0 "amdgpu/vcn_5_0_0.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN);
MODULE_FIRMWARE(FIRMWARE_PICASSO);
@ -82,6 +83,7 @@ MODULE_FIRMWARE(FIRMWARE_VCN4_0_2);
MODULE_FIRMWARE(FIRMWARE_VCN4_0_3);
MODULE_FIRMWARE(FIRMWARE_VCN4_0_4);
MODULE_FIRMWARE(FIRMWARE_VCN4_0_5);
MODULE_FIRMWARE(FIRMWARE_VCN5_0_0);
static void amdgpu_vcn_idle_work_handler(struct work_struct *work);

View File

@ -160,6 +160,48 @@
} \
} while (0)
#define SOC24_DPG_MODE_OFFSET(ip, inst_idx, reg) \
({ \
uint32_t internal_reg_offset, addr; \
bool video_range, aon_range; \
\
addr = (adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg); \
addr <<= 2; \
video_range = ((((0xFFFFF & addr) >= (VCN_VID_SOC_ADDRESS)) && \
((0xFFFFF & addr) < ((VCN_VID_SOC_ADDRESS + 0x2600))))); \
aon_range = ((((0xFFFFF & addr) >= (VCN_AON_SOC_ADDRESS)) && \
((0xFFFFF & addr) < ((VCN_AON_SOC_ADDRESS + 0x600))))); \
if (video_range) \
internal_reg_offset = ((0xFFFFF & addr) - (VCN_VID_SOC_ADDRESS) + \
(VCN_VID_IP_ADDRESS)); \
else if (aon_range) \
internal_reg_offset = ((0xFFFFF & addr) - (VCN_AON_SOC_ADDRESS) + \
(VCN_AON_IP_ADDRESS)); \
else \
internal_reg_offset = (0xFFFFF & addr); \
\
internal_reg_offset >>= 2; \
})
#define WREG32_SOC24_DPG_MODE(inst_idx, offset, value, mask_en, indirect) \
do { \
if (!indirect) { \
WREG32_SOC15(VCN, GET_INST(VCN, inst_idx), \
regUVD_DPG_LMA_DATA, value); \
WREG32_SOC15( \
VCN, GET_INST(VCN, inst_idx), \
regUVD_DPG_LMA_CTL, \
(0x1 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \
mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \
offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \
} else { \
*adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = \
offset; \
*adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = \
value; \
} \
} while (0)
#define AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE (1 << 2)
#define AMDGPU_FW_SHARED_FLAG_0_DRM_KEY_INJECT (1 << 4)
#define AMDGPU_VCN_FW_SHARED_FLAG_0_RB (1 << 6)

View File

@ -135,11 +135,20 @@ struct amdgpu_mem_stats;
#define AMDGPU_IS_MMHUB0(x) ((x) >= AMDGPU_MMHUB0_START && (x) < AMDGPU_MMHUB1_START)
#define AMDGPU_IS_MMHUB1(x) ((x) >= AMDGPU_MMHUB1_START && (x) < AMDGPU_MAX_VMHUBS)
/* Reserve 2MB at top/bottom of address space for kernel use */
/* Reserve space at top/bottom of address space for kernel use */
#define AMDGPU_VA_RESERVED_CSA_SIZE (2ULL << 20)
#define AMDGPU_VA_RESERVED_CSA_START(adev) (((adev)->vm_manager.max_pfn \
<< AMDGPU_GPU_PAGE_SHIFT) \
- AMDGPU_VA_RESERVED_CSA_SIZE)
#define AMDGPU_VA_RESERVED_SEQ64_SIZE (2ULL << 20)
#define AMDGPU_VA_RESERVED_BOTTOM (2ULL << 20)
#define AMDGPU_VA_RESERVED_TOP (AMDGPU_VA_RESERVED_SEQ64_SIZE + \
#define AMDGPU_VA_RESERVED_SEQ64_START(adev) (AMDGPU_VA_RESERVED_CSA_START(adev) \
- AMDGPU_VA_RESERVED_SEQ64_SIZE)
#define AMDGPU_VA_RESERVED_TRAP_SIZE (2ULL << 12)
#define AMDGPU_VA_RESERVED_TRAP_START(adev) (AMDGPU_VA_RESERVED_SEQ64_START(adev) \
- AMDGPU_VA_RESERVED_TRAP_SIZE)
#define AMDGPU_VA_RESERVED_BOTTOM (1ULL << 16)
#define AMDGPU_VA_RESERVED_TOP (AMDGPU_VA_RESERVED_TRAP_SIZE + \
AMDGPU_VA_RESERVED_SEQ64_SIZE + \
AMDGPU_VA_RESERVED_CSA_SIZE)
/* See vm_update_mode */

View File

@ -0,0 +1,122 @@
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "athub_v4_1_0.h"
#include "athub/athub_4_1_0_offset.h"
#include "athub/athub_4_1_0_sh_mask.h"
#include "soc15_common.h"
static uint32_t athub_v4_1_0_get_cg_cntl(struct amdgpu_device *adev)
{
uint32_t data;
switch (amdgpu_ip_version(adev, ATHUB_HWIP, 0)) {
case IP_VERSION(4, 1, 0):
data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
break;
default:
data = 0;
break;
}
return data;
}
static void athub_v4_1_0_set_cg_cntl(struct amdgpu_device *adev, uint32_t data)
{
switch (amdgpu_ip_version(adev, ATHUB_HWIP, 0)) {
case IP_VERSION(4, 1, 0):
WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL, data);
break;
default:
break;
}
}
static void
athub_v4_1_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
bool enable)
{
uint32_t def, data;
def = data = athub_v4_1_0_get_cg_cntl(adev);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ATHUB_MGCG))
data |= ATHUB_MISC_CNTL__CG_ENABLE_MASK;
else
data &= ~ATHUB_MISC_CNTL__CG_ENABLE_MASK;
if (def != data)
athub_v4_1_0_set_cg_cntl(adev, data);
}
static void
athub_v4_1_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
bool enable)
{
uint32_t def, data;
def = data = athub_v4_1_0_get_cg_cntl(adev);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ATHUB_LS))
data |= ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
else
data &= ~ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
if (def != data)
athub_v4_1_0_set_cg_cntl(adev, data);
}
int athub_v4_1_0_set_clockgating(struct amdgpu_device *adev,
enum amd_clockgating_state state)
{
if (amdgpu_sriov_vf(adev))
return 0;
switch (amdgpu_ip_version(adev, ATHUB_HWIP, 0)) {
case IP_VERSION(4, 1, 0):
athub_v4_1_0_update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
athub_v4_1_0_update_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE);
break;
default:
break;
}
return 0;
}
void athub_v4_1_0_get_clockgating(struct amdgpu_device *adev, u64 *flags)
{
int data;
/* AMD_CG_SUPPORT_ATHUB_MGCG */
data = athub_v4_1_0_get_cg_cntl(adev);
if (data & ATHUB_MISC_CNTL__CG_ENABLE_MASK)
*flags |= AMD_CG_SUPPORT_ATHUB_MGCG;
/* AMD_CG_SUPPORT_ATHUB_LS */
if (data & ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK)
*flags |= AMD_CG_SUPPORT_ATHUB_LS;
}

View File

@ -0,0 +1,30 @@
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef __ATHUB_V4_1_0_H__
#define __ATHUB_V4_1_0_H__
int athub_v4_1_0_set_clockgating(struct amdgpu_device *adev,
enum amd_clockgating_state state);
void athub_v4_1_0_get_clockgating(struct amdgpu_device *adev, u64 *flags);
#endif

View File

@ -571,6 +571,7 @@ static void gmc_v11_0_set_mmhub_funcs(struct amdgpu_device *adev)
adev->mmhub.funcs = &mmhub_v3_0_2_funcs;
break;
case IP_VERSION(3, 3, 0):
case IP_VERSION(3, 3, 1):
adev->mmhub.funcs = &mmhub_v3_3_funcs;
break;
default:

View File

@ -0,0 +1,142 @@
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "amdgpu_atombios.h"
#include "hdp_v7_0.h"
#include "hdp/hdp_7_0_0_offset.h"
#include "hdp/hdp_7_0_0_sh_mask.h"
#include <uapi/linux/kfd_ioctl.h>
static void hdp_v7_0_flush_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg)
WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
else
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}
static void hdp_v7_0_update_clock_gating(struct amdgpu_device *adev,
bool enable)
{
uint32_t hdp_clk_cntl, hdp_clk_cntl1;
uint32_t hdp_mem_pwr_cntl;
if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_HDP_DS |
AMD_CG_SUPPORT_HDP_SD)))
return;
hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0,regHDP_CLK_CNTL);
hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL);
/* Before doing clock/power mode switch,
* forced on IPH & RC clock */
hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
RC_MEM_CLK_SOFT_OVERRIDE, 1);
WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl);
/* disable clock and power gating before any changing */
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
ATOMIC_MEM_POWER_CTRL_EN, 0);
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
ATOMIC_MEM_POWER_LS_EN, 0);
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
ATOMIC_MEM_POWER_DS_EN, 0);
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
ATOMIC_MEM_POWER_SD_EN, 0);
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
RC_MEM_POWER_CTRL_EN, 0);
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
RC_MEM_POWER_LS_EN, 0);
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
RC_MEM_POWER_DS_EN, 0);
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
RC_MEM_POWER_SD_EN, 0);
WREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
/* Already disabled above. The actions below are for "enabled" only */
if (enable) {
/* only one clock gating mode (LS/DS/SD) can be enabled */
if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) {
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
HDP_MEM_POWER_CTRL,
ATOMIC_MEM_POWER_SD_EN, 1);
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
HDP_MEM_POWER_CTRL,
RC_MEM_POWER_SD_EN, 1);
} else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
HDP_MEM_POWER_CTRL,
ATOMIC_MEM_POWER_LS_EN, 1);
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
HDP_MEM_POWER_CTRL,
RC_MEM_POWER_LS_EN, 1);
} else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) {
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
HDP_MEM_POWER_CTRL,
ATOMIC_MEM_POWER_DS_EN, 1);
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
HDP_MEM_POWER_CTRL,
RC_MEM_POWER_DS_EN, 1);
}
/* confirmed that IPH_MEM_POWER_CTRL_EN and RC_MEM_POWER_CTRL_EN have to
* be set for SRAM LS/DS/SD */
if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_DS |
AMD_CG_SUPPORT_HDP_SD)) {
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
ATOMIC_MEM_POWER_CTRL_EN, 1);
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
RC_MEM_POWER_CTRL_EN, 1);
WREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
}
}
/* disable IPH & RC clock override after clock/power mode changing */
hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
RC_MEM_CLK_SOFT_OVERRIDE, 0);
WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl);
}
static void hdp_v7_0_get_clockgating_state(struct amdgpu_device *adev,
u64 *flags)
{
uint32_t tmp;
/* AMD_CG_SUPPORT_HDP_LS/DS/SD */
tmp = RREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL);
if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_LS_EN_MASK)
*flags |= AMD_CG_SUPPORT_HDP_LS;
else if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_DS_EN_MASK)
*flags |= AMD_CG_SUPPORT_HDP_DS;
else if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_SD_EN_MASK)
*flags |= AMD_CG_SUPPORT_HDP_SD;
}
const struct amdgpu_hdp_funcs hdp_v7_0_funcs = {
.flush_hdp = hdp_v7_0_flush_hdp,
.update_clock_gating = hdp_v7_0_update_clock_gating,
.get_clock_gating_state = hdp_v7_0_get_clockgating_state,
};

View File

@ -0,0 +1,31 @@
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef __HDP_V7_0_H__
#define __HDP_V7_0_H__
#include "soc15_common.h"
extern const struct amdgpu_hdp_funcs hdp_v7_0_funcs;
#endif

View File

@ -0,0 +1,767 @@
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/pci.h>
#include "amdgpu.h"
#include "amdgpu_ih.h"
#include "oss/osssys_7_0_0_offset.h"
#include "oss/osssys_7_0_0_sh_mask.h"
#include "soc15_common.h"
#include "ih_v7_0.h"
#define MAX_REARM_RETRY 10
static void ih_v7_0_set_interrupt_funcs(struct amdgpu_device *adev);
/**
* ih_v7_0_init_register_offset - Initialize register offset for ih rings
*
* @adev: amdgpu_device pointer
*
* Initialize register offset ih rings (IH_V7_0).
*/
static void ih_v7_0_init_register_offset(struct amdgpu_device *adev)
{
struct amdgpu_ih_regs *ih_regs;
/* ih ring 2 is removed
* ih ring and ih ring 1 are available */
if (adev->irq.ih.ring_size) {
ih_regs = &adev->irq.ih.ih_regs;
ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE);
ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE_HI);
ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_CNTL);
ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR);
ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_RPTR);
ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_DOORBELL_RPTR);
ih_regs->ih_rb_wptr_addr_lo = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR_ADDR_LO);
ih_regs->ih_rb_wptr_addr_hi = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR_ADDR_HI);
ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL;
}
if (adev->irq.ih1.ring_size) {
ih_regs = &adev->irq.ih1.ih_regs;
ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE_RING1);
ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE_HI_RING1);
ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_CNTL_RING1);
ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR_RING1);
ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_RPTR_RING1);
ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_DOORBELL_RPTR_RING1);
ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING1;
}
}
/**
* force_update_wptr_for_self_int - Force update the wptr for self interrupt
*
* @adev: amdgpu_device pointer
* @threshold: threshold to trigger the wptr reporting
* @timeout: timeout to trigger the wptr reporting
* @enabled: Enable/disable timeout flush mechanism
*
* threshold input range: 0 ~ 15, default 0,
* real_threshold = 2^threshold
* timeout input range: 0 ~ 20, default 8,
* real_timeout = (2^timeout) * 1024 / (socclk_freq)
*
* Force update wptr for self interrupt ( >= SIENNA_CICHLID).
*/
static void
force_update_wptr_for_self_int(struct amdgpu_device *adev,
u32 threshold, u32 timeout, bool enabled)
{
u32 ih_cntl, ih_rb_cntl;
ih_cntl = RREG32_SOC15(OSSSYS, 0, regIH_CNTL2);
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, regIH_RB_CNTL_RING1);
ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL2,
SELF_IV_FORCE_WPTR_UPDATE_TIMEOUT, timeout);
ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL2,
SELF_IV_FORCE_WPTR_UPDATE_ENABLE, enabled);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
RB_USED_INT_THRESHOLD, threshold);
if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1, ih_rb_cntl))
return;
} else {
WREG32_SOC15(OSSSYS, 0, regIH_RB_CNTL_RING1, ih_rb_cntl);
}
WREG32_SOC15(OSSSYS, 0, regIH_CNTL2, ih_cntl);
}
/**
* ih_v7_0_toggle_ring_interrupts - toggle the interrupt ring buffer
*
* @adev: amdgpu_device pointer
* @ih: amdgpu_ih_ring pointet
* @enable: true - enable the interrupts, false - disable the interrupts
*
* Toggle the interrupt ring buffer (IH_V7_0)
*/
static int ih_v7_0_toggle_ring_interrupts(struct amdgpu_device *adev,
struct amdgpu_ih_ring *ih,
bool enable)
{
struct amdgpu_ih_regs *ih_regs;
uint32_t tmp;
ih_regs = &ih->ih_regs;
tmp = RREG32(ih_regs->ih_rb_cntl);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
/* enable_intr field is only valid in ring0 */
if (ih == &adev->irq.ih)
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
return -ETIMEDOUT;
} else {
WREG32(ih_regs->ih_rb_cntl, tmp);
}
if (enable) {
ih->enabled = true;
} else {
/* set rptr, wptr to 0 */
WREG32(ih_regs->ih_rb_rptr, 0);
WREG32(ih_regs->ih_rb_wptr, 0);
ih->enabled = false;
ih->rptr = 0;
}
return 0;
}
/**
* ih_v7_0_toggle_interrupts - Toggle all the available interrupt ring buffers
*
* @adev: amdgpu_device pointer
* @enable: enable or disable interrupt ring buffers
*
* Toggle all the available interrupt ring buffers (IH_V7_0).
*/
static int ih_v7_0_toggle_interrupts(struct amdgpu_device *adev, bool enable)
{
struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1};
int i;
int r;
for (i = 0; i < ARRAY_SIZE(ih); i++) {
if (ih[i]->ring_size) {
r = ih_v7_0_toggle_ring_interrupts(adev, ih[i], enable);
if (r)
return r;
}
}
return 0;
}
static uint32_t ih_v7_0_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl)
{
int rb_bufsz = order_base_2(ih->ring_size / 4);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
MC_SPACE, ih->use_bus_addr ? 2 : 4);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
WPTR_OVERFLOW_CLEAR, 1);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
WPTR_OVERFLOW_ENABLE, 1);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz);
/* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register
* value is written to memory
*/
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
WPTR_WRITEBACK_ENABLE, 1);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SNOOP, 1);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_RO, 0);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0);
return ih_rb_cntl;
}
static uint32_t ih_v7_0_doorbell_rptr(struct amdgpu_ih_ring *ih)
{
u32 ih_doorbell_rtpr = 0;
if (ih->use_doorbell) {
ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
IH_DOORBELL_RPTR, OFFSET,
ih->doorbell_index);
ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
IH_DOORBELL_RPTR,
ENABLE, 1);
} else {
ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
IH_DOORBELL_RPTR,
ENABLE, 0);
}
return ih_doorbell_rtpr;
}
/**
* ih_v7_0_enable_ring - enable an ih ring buffer
*
* @adev: amdgpu_device pointer
* @ih: amdgpu_ih_ring pointer
*
* Enable an ih ring buffer (IH_V7_0)
*/
static int ih_v7_0_enable_ring(struct amdgpu_device *adev,
struct amdgpu_ih_ring *ih)
{
struct amdgpu_ih_regs *ih_regs;
uint32_t tmp;
ih_regs = &ih->ih_regs;
/* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
WREG32(ih_regs->ih_rb_base, ih->gpu_addr >> 8);
WREG32(ih_regs->ih_rb_base_hi, (ih->gpu_addr >> 40) & 0xff);
tmp = RREG32(ih_regs->ih_rb_cntl);
tmp = ih_v7_0_rb_cntl(ih, tmp);
if (ih == &adev->irq.ih)
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
if (ih == &adev->irq.ih1) {
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
}
if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
return -ETIMEDOUT;
}
} else {
WREG32(ih_regs->ih_rb_cntl, tmp);
}
if (ih == &adev->irq.ih) {
/* set the ih ring 0 writeback address whether it's enabled or not */
WREG32(ih_regs->ih_rb_wptr_addr_lo, lower_32_bits(ih->wptr_addr));
WREG32(ih_regs->ih_rb_wptr_addr_hi, upper_32_bits(ih->wptr_addr) & 0xFFFF);
}
/* set rptr, wptr to 0 */
WREG32(ih_regs->ih_rb_wptr, 0);
WREG32(ih_regs->ih_rb_rptr, 0);
WREG32(ih_regs->ih_doorbell_rptr, ih_v7_0_doorbell_rptr(ih));
return 0;
}
/**
* ih_v7_0_irq_init - init and enable the interrupt ring
*
* @adev: amdgpu_device pointer
*
* Allocate a ring buffer for the interrupt controller,
* enable the RLC, disable interrupts, enable the IH
* ring buffer and enable it.
* Called at device load and reume.
* Returns 0 for success, errors for failure.
*/
static int ih_v7_0_irq_init(struct amdgpu_device *adev)
{
struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1};
u32 ih_chicken;
u32 tmp;
int ret;
int i;
/* disable irqs */
ret = ih_v7_0_toggle_interrupts(adev, false);
if (ret)
return ret;
adev->nbio.funcs->ih_control(adev);
if (unlikely((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) ||
(adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO))) {
if (ih[0]->use_bus_addr) {
ih_chicken = RREG32_SOC15(OSSSYS, 0, regIH_CHICKEN);
ih_chicken = REG_SET_FIELD(ih_chicken,
IH_CHICKEN, MC_SPACE_GPA_ENABLE, 1);
WREG32_SOC15(OSSSYS, 0, regIH_CHICKEN, ih_chicken);
}
}
for (i = 0; i < ARRAY_SIZE(ih); i++) {
if (ih[i]->ring_size) {
ret = ih_v7_0_enable_ring(adev, ih[i]);
if (ret)
return ret;
}
}
/* update doorbell range for ih ring 0 */
adev->nbio.funcs->ih_doorbell_range(adev, ih[0]->use_doorbell,
ih[0]->doorbell_index);
tmp = RREG32_SOC15(OSSSYS, 0, regIH_STORM_CLIENT_LIST_CNTL);
tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
CLIENT18_IS_STORM_CLIENT, 1);
WREG32_SOC15(OSSSYS, 0, regIH_STORM_CLIENT_LIST_CNTL, tmp);
tmp = RREG32_SOC15(OSSSYS, 0, regIH_INT_FLOOD_CNTL);
tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
WREG32_SOC15(OSSSYS, 0, regIH_INT_FLOOD_CNTL, tmp);
/* GC/MMHUB UTCL2 page fault interrupts are configured as
* MSI storm capable interrupts by deafult. The delay is
* used to avoid ISR being called too frequently
* when page fault happens on several continuous page
* and thus avoid MSI storm */
tmp = RREG32_SOC15(OSSSYS, 0, regIH_MSI_STORM_CTRL);
tmp = REG_SET_FIELD(tmp, IH_MSI_STORM_CTRL,
DELAY, 3);
WREG32_SOC15(OSSSYS, 0, regIH_MSI_STORM_CTRL, tmp);
pci_set_master(adev->pdev);
/* enable interrupts */
ret = ih_v7_0_toggle_interrupts(adev, true);
if (ret)
return ret;
/* enable wptr force update for self int */
force_update_wptr_for_self_int(adev, 0, 8, true);
if (adev->irq.ih_soft.ring_size)
adev->irq.ih_soft.enabled = true;
return 0;
}
/**
* ih_v7_0_irq_disable - disable interrupts
*
* @adev: amdgpu_device pointer
*
* Disable interrupts on the hw.
*/
static void ih_v7_0_irq_disable(struct amdgpu_device *adev)
{
force_update_wptr_for_self_int(adev, 0, 8, false);
ih_v7_0_toggle_interrupts(adev, false);
/* Wait and acknowledge irq */
mdelay(1);
}
/**
* ih_v7_0_get_wptr() - get the IH ring buffer wptr
*
* @adev: amdgpu_device pointer
* @ih: IH ring buffer to fetch wptr
*
* Get the IH ring buffer wptr from either the register
* or the writeback memory buffer. Also check for
* ring buffer overflow and deal with it.
* Returns the value of the wptr.
*/
static u32 ih_v7_0_get_wptr(struct amdgpu_device *adev,
struct amdgpu_ih_ring *ih)
{
u32 wptr, tmp;
struct amdgpu_ih_regs *ih_regs;
wptr = le32_to_cpu(*ih->wptr_cpu);
ih_regs = &ih->ih_regs;
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
/* When a ring buffer overflow happen start parsing interrupt
* from the last not overwritten vector (wptr + 32). Hopefully
* this should allow us to catch up.
*/
tmp = (wptr + 32) & ih->ptr_mask;
dev_warn(adev->dev, "IH ring buffer overflow "
"(0x%08X, 0x%08X, 0x%08X)\n",
wptr, ih->rptr, tmp);
ih->rptr = tmp;
tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
out:
return (wptr & ih->ptr_mask);
}
/**
* ih_v7_0_irq_rearm - rearm IRQ if lost
*
* @adev: amdgpu_device pointer
* @ih: IH ring to match
*
*/
static void ih_v7_0_irq_rearm(struct amdgpu_device *adev,
struct amdgpu_ih_ring *ih)
{
uint32_t v = 0;
uint32_t i = 0;
struct amdgpu_ih_regs *ih_regs;
ih_regs = &ih->ih_regs;
/* Rearm IRQ / re-write doorbell if doorbell write is lost */
for (i = 0; i < MAX_REARM_RETRY; i++) {
v = RREG32_NO_KIQ(ih_regs->ih_rb_rptr);
if ((v < ih->ring_size) && (v != ih->rptr))
WDOORBELL32(ih->doorbell_index, ih->rptr);
else
break;
}
}
/**
* ih_v7_0_set_rptr - set the IH ring buffer rptr
*
* @adev: amdgpu_device pointer
* @ih: IH ring buffer to set rptr
*/
static void ih_v7_0_set_rptr(struct amdgpu_device *adev,
struct amdgpu_ih_ring *ih)
{
struct amdgpu_ih_regs *ih_regs;
if (ih->use_doorbell) {
/* XXX check if swapping is necessary on BE */
*ih->rptr_cpu = ih->rptr;
WDOORBELL32(ih->doorbell_index, ih->rptr);
if (amdgpu_sriov_vf(adev))
ih_v7_0_irq_rearm(adev, ih);
} else {
ih_regs = &ih->ih_regs;
WREG32(ih_regs->ih_rb_rptr, ih->rptr);
}
}
/**
* ih_v7_0_self_irq - dispatch work for ring 1
*
* @adev: amdgpu_device pointer
* @source: irq source
* @entry: IV with WPTR update
*
* Update the WPTR from the IV and schedule work to handle the entries.
*/
static int ih_v7_0_self_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
uint32_t wptr = cpu_to_le32(entry->src_data[0]);
switch (entry->ring_id) {
case 1:
*adev->irq.ih1.wptr_cpu = wptr;
schedule_work(&adev->irq.ih1_work);
break;
default: break;
}
return 0;
}
static const struct amdgpu_irq_src_funcs ih_v7_0_self_irq_funcs = {
.process = ih_v7_0_self_irq,
};
static void ih_v7_0_set_self_irq_funcs(struct amdgpu_device *adev)
{
adev->irq.self_irq.num_types = 0;
adev->irq.self_irq.funcs = &ih_v7_0_self_irq_funcs;
}
static int ih_v7_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
ih_v7_0_set_interrupt_funcs(adev);
ih_v7_0_set_self_irq_funcs(adev);
return 0;
}
static int ih_v7_0_sw_init(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool use_bus_addr;
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_IH, 0,
&adev->irq.self_irq);
if (r)
return r;
/* use gpu virtual address for ih ring
* until ih_checken is programmed to allow
* use bus address for ih ring by psp bl */
use_bus_addr =
(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) ? false : true;
r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, use_bus_addr);
if (r)
return r;
adev->irq.ih.use_doorbell = true;
adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
adev->irq.ih1.ring_size = 0;
adev->irq.ih2.ring_size = 0;
/* initialize ih control register offset */
ih_v7_0_init_register_offset(adev);
r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true);
if (r)
return r;
r = amdgpu_irq_init(adev);
return r;
}
static int ih_v7_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_irq_fini_sw(adev);
return 0;
}
static int ih_v7_0_hw_init(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = ih_v7_0_irq_init(adev);
if (r)
return r;
return 0;
}
static int ih_v7_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
ih_v7_0_irq_disable(adev);
return 0;
}
static int ih_v7_0_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return ih_v7_0_hw_fini(adev);
}
static int ih_v7_0_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return ih_v7_0_hw_init(adev);
}
static bool ih_v7_0_is_idle(void *handle)
{
/* todo */
return true;
}
static int ih_v7_0_wait_for_idle(void *handle)
{
/* todo */
return -ETIMEDOUT;
}
static int ih_v7_0_soft_reset(void *handle)
{
/* todo */
return 0;
}
static void ih_v7_0_update_clockgating_state(struct amdgpu_device *adev,
bool enable)
{
uint32_t data, def, field_val;
if (adev->cg_flags & AMD_CG_SUPPORT_IH_CG) {
def = data = RREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL);
field_val = enable ? 0 : 1;
data = REG_SET_FIELD(data, IH_CLK_CTRL,
DBUS_MUX_CLK_SOFT_OVERRIDE, field_val);
data = REG_SET_FIELD(data, IH_CLK_CTRL,
OSSSYS_SHARE_CLK_SOFT_OVERRIDE, field_val);
data = REG_SET_FIELD(data, IH_CLK_CTRL,
LIMIT_SMN_CLK_SOFT_OVERRIDE, field_val);
data = REG_SET_FIELD(data, IH_CLK_CTRL,
DYN_CLK_SOFT_OVERRIDE, field_val);
data = REG_SET_FIELD(data, IH_CLK_CTRL,
REG_CLK_SOFT_OVERRIDE, field_val);
if (def != data)
WREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL, data);
}
return;
}
static int ih_v7_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
ih_v7_0_update_clockgating_state(adev,
state == AMD_CG_STATE_GATE);
return 0;
}
static void ih_v7_0_update_ih_mem_power_gating(struct amdgpu_device *adev,
bool enable)
{
uint32_t ih_mem_pwr_cntl;
/* Disable ih sram power cntl before switch powergating mode */
ih_mem_pwr_cntl = RREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL);
ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
IH_BUFFER_MEM_POWER_CTRL_EN, 0);
WREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL, ih_mem_pwr_cntl);
/* It is recommended to set mem powergating mode to DS mode */
if (enable) {
/* mem power mode */
ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
IH_BUFFER_MEM_POWER_LS_EN, 0);
ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
IH_BUFFER_MEM_POWER_DS_EN, 1);
ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
IH_BUFFER_MEM_POWER_SD_EN, 0);
/* cam mem power mode */
ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
IH_RETRY_INT_CAM_MEM_POWER_LS_EN, 0);
ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
IH_RETRY_INT_CAM_MEM_POWER_DS_EN, 1);
ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
IH_RETRY_INT_CAM_MEM_POWER_SD_EN, 0);
/* re-enable power cntl */
ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
IH_BUFFER_MEM_POWER_CTRL_EN, 1);
} else {
/* mem power mode */
ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
IH_BUFFER_MEM_POWER_LS_EN, 0);
ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
IH_BUFFER_MEM_POWER_DS_EN, 0);
ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
IH_BUFFER_MEM_POWER_SD_EN, 0);
/* cam mem power mode */
ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
IH_RETRY_INT_CAM_MEM_POWER_LS_EN, 0);
ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
IH_RETRY_INT_CAM_MEM_POWER_DS_EN, 0);
ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
IH_RETRY_INT_CAM_MEM_POWER_SD_EN, 0);
/* re-enable power cntl*/
ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
IH_BUFFER_MEM_POWER_CTRL_EN, 1);
}
WREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL, ih_mem_pwr_cntl);
}
static int ih_v7_0_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_PG_STATE_GATE);
if (adev->pg_flags & AMD_PG_SUPPORT_IH_SRAM_PG)
ih_v7_0_update_ih_mem_power_gating(adev, enable);
return 0;
}
static void ih_v7_0_get_clockgating_state(void *handle, u64 *flags)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!RREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL))
*flags |= AMD_CG_SUPPORT_IH_CG;
return;
}
static const struct amd_ip_funcs ih_v7_0_ip_funcs = {
.name = "ih_v7_0",
.early_init = ih_v7_0_early_init,
.late_init = NULL,
.sw_init = ih_v7_0_sw_init,
.sw_fini = ih_v7_0_sw_fini,
.hw_init = ih_v7_0_hw_init,
.hw_fini = ih_v7_0_hw_fini,
.suspend = ih_v7_0_suspend,
.resume = ih_v7_0_resume,
.is_idle = ih_v7_0_is_idle,
.wait_for_idle = ih_v7_0_wait_for_idle,
.soft_reset = ih_v7_0_soft_reset,
.set_clockgating_state = ih_v7_0_set_clockgating_state,
.set_powergating_state = ih_v7_0_set_powergating_state,
.get_clockgating_state = ih_v7_0_get_clockgating_state,
};
static const struct amdgpu_ih_funcs ih_v7_0_funcs = {
.get_wptr = ih_v7_0_get_wptr,
.decode_iv = amdgpu_ih_decode_iv_helper,
.decode_iv_ts = amdgpu_ih_decode_iv_ts_helper,
.set_rptr = ih_v7_0_set_rptr
};
static void ih_v7_0_set_interrupt_funcs(struct amdgpu_device *adev)
{
adev->irq.ih_funcs = &ih_v7_0_funcs;
}
const struct amdgpu_ip_block_version ih_v7_0_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_IH,
.major = 7,
.minor = 0,
.rev = 0,
.funcs = &ih_v7_0_ip_funcs,
};

View File

@ -0,0 +1,28 @@
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef __IH_V7_0_IH_H__
#define __IH_V7_0_IH_H__
extern const struct amdgpu_ip_block_version ih_v7_0_ip_block;
#endif

View File

@ -652,7 +652,7 @@ static void jpeg_v4_0_3_dec_ring_set_wptr(struct amdgpu_ring *ring)
*
* Write a start command to the ring.
*/
static void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring)
void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring)
{
if (!amdgpu_sriov_vf(ring->adev)) {
amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
@ -672,7 +672,7 @@ static void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring)
*
* Write a end command to the ring.
*/
static void jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring *ring)
void jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring *ring)
{
if (!amdgpu_sriov_vf(ring->adev)) {
amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
@ -695,7 +695,7 @@ static void jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring *ring)
*
* Write a fence and a trap command to the ring.
*/
static void jpeg_v4_0_3_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
void jpeg_v4_0_3_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
unsigned int flags)
{
WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
@ -764,7 +764,7 @@ static void jpeg_v4_0_3_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
*
* Write ring commands to execute the indirect buffer.
*/
static void jpeg_v4_0_3_dec_ring_emit_ib(struct amdgpu_ring *ring,
void jpeg_v4_0_3_dec_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_job *job,
struct amdgpu_ib *ib,
uint32_t flags)
@ -815,7 +815,7 @@ static void jpeg_v4_0_3_dec_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, 0x2);
}
static void jpeg_v4_0_3_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
void jpeg_v4_0_3_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
uint32_t val, uint32_t mask)
{
uint32_t reg_offset = (reg << 2);
@ -842,7 +842,7 @@ static void jpeg_v4_0_3_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_
amdgpu_ring_write(ring, mask);
}
static void jpeg_v4_0_3_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
void jpeg_v4_0_3_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned int vmid, uint64_t pd_addr)
{
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
@ -857,7 +857,7 @@ static void jpeg_v4_0_3_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
jpeg_v4_0_3_dec_ring_emit_reg_wait(ring, data0, data1, mask);
}
static void jpeg_v4_0_3_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
void jpeg_v4_0_3_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
{
uint32_t reg_offset = (reg << 2);
@ -875,7 +875,7 @@ static void jpeg_v4_0_3_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t re
amdgpu_ring_write(ring, val);
}
static void jpeg_v4_0_3_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count)
void jpeg_v4_0_3_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count)
{
int i;

View File

@ -48,4 +48,19 @@
extern const struct amdgpu_ip_block_version jpeg_v4_0_3_ip_block;
void jpeg_v4_0_3_dec_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_job *job,
struct amdgpu_ib *ib,
uint32_t flags);
void jpeg_v4_0_3_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
unsigned int flags);
void jpeg_v4_0_3_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned int vmid, uint64_t pd_addr);
void jpeg_v4_0_3_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count);
void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring);
void jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring *ring);
void jpeg_v4_0_3_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
void jpeg_v4_0_3_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
uint32_t val, uint32_t mask);
#endif /* __JPEG_V4_0_3_H__ */

View File

@ -34,7 +34,17 @@
#include "vcn/vcn_4_0_5_sh_mask.h"
#include "ivsrcid/vcn/irqsrcs_vcn_4_0.h"
#define regUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f
#define mmUVD_DPG_LMA_CTL regUVD_DPG_LMA_CTL
#define mmUVD_DPG_LMA_CTL_BASE_IDX regUVD_DPG_LMA_CTL_BASE_IDX
#define mmUVD_DPG_LMA_DATA regUVD_DPG_LMA_DATA
#define mmUVD_DPG_LMA_DATA_BASE_IDX regUVD_DPG_LMA_DATA_BASE_IDX
#define regUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f
#define regJPEG_DEC_GFX10_ADDR_CONFIG_INTERNAL_OFFSET 0x4026
#define regJPEG_SYS_INT_EN_INTERNAL_OFFSET 0x4141
#define regJPEG_CGC_CTRL_INTERNAL_OFFSET 0x4161
#define regJPEG_CGC_GATE_INTERNAL_OFFSET 0x4160
#define regUVD_NO_OP_INTERNAL_OFFSET 0x0029
static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v4_0_5_set_irq_funcs(struct amdgpu_device *adev);
@ -155,11 +165,18 @@ static int jpeg_v4_0_5_hw_init(void *handle)
struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
int r;
// TODO: Enable ring test with DPG support
if (adev->pg_flags & AMD_PG_SUPPORT_JPEG_DPG) {
DRM_DEV_INFO(adev->dev, "JPEG decode initialized successfully under DPG Mode");
return 0;
}
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
DRM_DEV_INFO(adev->dev, "JPEG decode initialized successfully.\n");
if (!r)
DRM_INFO("JPEG decode initialized successfully under SPG Mode\n");
return 0;
}
@ -227,11 +244,11 @@ static int jpeg_v4_0_5_resume(void *handle)
return r;
}
static void jpeg_v4_0_5_disable_clock_gating(struct amdgpu_device *adev)
static void jpeg_v4_0_5_disable_clock_gating(struct amdgpu_device *adev, int inst)
{
uint32_t data = 0;
data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL);
data = RREG32_SOC15(JPEG, inst, regJPEG_CGC_CTRL);
if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) {
data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
data &= (~JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK);
@ -241,21 +258,21 @@ static void jpeg_v4_0_5_disable_clock_gating(struct amdgpu_device *adev)
data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
WREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL, data);
WREG32_SOC15(JPEG, inst, regJPEG_CGC_CTRL, data);
data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE);
data = RREG32_SOC15(JPEG, inst, regJPEG_CGC_GATE);
data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK
| JPEG_CGC_GATE__JPEG2_DEC_MASK
| JPEG_CGC_GATE__JMCIF_MASK
| JPEG_CGC_GATE__JRBBM_MASK);
WREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE, data);
WREG32_SOC15(JPEG, inst, regJPEG_CGC_GATE, data);
}
static void jpeg_v4_0_5_enable_clock_gating(struct amdgpu_device *adev)
static void jpeg_v4_0_5_enable_clock_gating(struct amdgpu_device *adev, int inst)
{
uint32_t data = 0;
data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL);
data = RREG32_SOC15(JPEG, inst, regJPEG_CGC_CTRL);
if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) {
data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
data |= JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK;
@ -265,47 +282,66 @@ static void jpeg_v4_0_5_enable_clock_gating(struct amdgpu_device *adev)
data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
WREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL, data);
WREG32_SOC15(JPEG, inst, regJPEG_CGC_CTRL, data);
data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE);
data = RREG32_SOC15(JPEG, inst, regJPEG_CGC_GATE);
data |= (JPEG_CGC_GATE__JPEG_DEC_MASK
|JPEG_CGC_GATE__JPEG2_DEC_MASK
|JPEG_CGC_GATE__JMCIF_MASK
|JPEG_CGC_GATE__JRBBM_MASK);
WREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE, data);
WREG32_SOC15(JPEG, inst, regJPEG_CGC_GATE, data);
}
static int jpeg_v4_0_5_disable_static_power_gating(struct amdgpu_device *adev)
static void jpeg_engine_4_0_5_dpg_clock_gating_mode(struct amdgpu_device *adev,
int inst_idx, uint8_t indirect)
{
uint32_t data = 0;
if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG)
data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
else
data |= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
WREG32_SOC15_JPEG_DPG_MODE(inst_idx, regJPEG_CGC_CTRL_INTERNAL_OFFSET, data, indirect);
data = 0;
WREG32_SOC15_JPEG_DPG_MODE(inst_idx, regJPEG_CGC_GATE_INTERNAL_OFFSET,
data, indirect);
}
static int jpeg_v4_0_5_disable_static_power_gating(struct amdgpu_device *adev, int inst)
{
if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
WREG32(SOC15_REG_OFFSET(JPEG, 0, regUVD_IPX_DLDO_CONFIG),
WREG32(SOC15_REG_OFFSET(JPEG, inst, regUVD_IPX_DLDO_CONFIG),
1 << UVD_IPX_DLDO_CONFIG__ONO1_PWR_CONFIG__SHIFT);
SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_IPX_DLDO_STATUS,
SOC15_WAIT_ON_RREG(JPEG, inst, regUVD_IPX_DLDO_STATUS,
0, UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS_MASK);
}
/* disable anti hang mechanism */
WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), 0,
WREG32_P(SOC15_REG_OFFSET(JPEG, inst, regUVD_JPEG_POWER_STATUS), 0,
~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
/* keep the JPEG in static PG mode */
WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), 0,
WREG32_P(SOC15_REG_OFFSET(JPEG, inst, regUVD_JPEG_POWER_STATUS), 0,
~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK);
return 0;
}
static int jpeg_v4_0_5_enable_static_power_gating(struct amdgpu_device *adev)
static int jpeg_v4_0_5_enable_static_power_gating(struct amdgpu_device *adev, int inst)
{
/* enable anti hang mechanism */
WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS),
WREG32_P(SOC15_REG_OFFSET(JPEG, inst, regUVD_JPEG_POWER_STATUS),
UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
WREG32(SOC15_REG_OFFSET(JPEG, 0, regUVD_IPX_DLDO_CONFIG),
WREG32(SOC15_REG_OFFSET(JPEG, inst, regUVD_IPX_DLDO_CONFIG),
2 << UVD_IPX_DLDO_CONFIG__ONO1_PWR_CONFIG__SHIFT);
SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_IPX_DLDO_STATUS,
SOC15_WAIT_ON_RREG(JPEG, inst, regUVD_IPX_DLDO_STATUS,
1 << UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS__SHIFT,
UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS_MASK);
}
@ -313,6 +349,88 @@ static int jpeg_v4_0_5_enable_static_power_gating(struct amdgpu_device *adev)
return 0;
}
/**
* jpeg_v4_0_5_start_dpg_mode - Jpeg start with dpg mode
*
* @adev: amdgpu_device pointer
* @inst_idx: instance number index
* @indirect: indirectly write sram
*
* Start JPEG block with dpg mode
*/
static void jpeg_v4_0_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
{
struct amdgpu_ring *ring = adev->jpeg.inst[inst_idx].ring_dec;
uint32_t reg_data = 0;
/* enable anti hang mechanism */
reg_data = RREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS);
reg_data &= ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK;
reg_data |= 0x1;
WREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS, reg_data);
if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
WREG32(SOC15_REG_OFFSET(JPEG, inst_idx, regUVD_IPX_DLDO_CONFIG),
2 << UVD_IPX_DLDO_CONFIG__ONO1_PWR_CONFIG__SHIFT);
SOC15_WAIT_ON_RREG(JPEG, inst_idx, regUVD_IPX_DLDO_STATUS,
1 << UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS__SHIFT,
UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS_MASK);
}
reg_data = RREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS);
reg_data |= UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK;
WREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS, reg_data);
if (indirect)
adev->jpeg.inst[inst_idx].dpg_sram_curr_addr =
(uint32_t *)adev->jpeg.inst[inst_idx].dpg_sram_cpu_addr;
jpeg_engine_4_0_5_dpg_clock_gating_mode(adev, inst_idx, indirect);
/* MJPEG global tiling registers */
WREG32_SOC15_JPEG_DPG_MODE(inst_idx, regJPEG_DEC_GFX10_ADDR_CONFIG_INTERNAL_OFFSET,
adev->gfx.config.gb_addr_config, indirect);
/* enable System Interrupt for JRBC */
WREG32_SOC15_JPEG_DPG_MODE(inst_idx, regJPEG_SYS_INT_EN_INTERNAL_OFFSET,
JPEG_SYS_INT_EN__DJRBC_MASK, indirect);
/* add nop to workaround PSP size check */
WREG32_SOC15_JPEG_DPG_MODE(inst_idx, regUVD_NO_OP_INTERNAL_OFFSET, 0, indirect);
if (indirect)
amdgpu_jpeg_psp_update_sram(adev, inst_idx, 0);
WREG32_SOC15(JPEG, inst_idx, regUVD_LMI_JRBC_RB_VMID, 0);
WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
WREG32_SOC15(JPEG, inst_idx, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
WREG32_SOC15(JPEG, inst_idx, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
upper_32_bits(ring->gpu_addr));
WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_RPTR, 0);
WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_WPTR, 0);
WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_CNTL, 0x00000002L);
WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_SIZE, ring->ring_size / 4);
ring->wptr = RREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_WPTR);
}
/**
* jpeg_v4_0_5_stop_dpg_mode - Jpeg stop with dpg mode
*
* @adev: amdgpu_device pointer
* @inst_idx: instance number index
*
* Stop JPEG block with dpg mode
*/
static void jpeg_v4_0_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
{
uint32_t reg_data = 0;
reg_data = RREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS);
reg_data &= ~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK;
WREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS, reg_data);
}
/**
* jpeg_v4_0_5_start - start JPEG block
*
@ -323,52 +441,58 @@ static int jpeg_v4_0_5_enable_static_power_gating(struct amdgpu_device *adev)
static int jpeg_v4_0_5_start(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
int r;
int r, i;
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_jpeg(adev, true);
/* doorbell programming is done for every playback */
adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
(adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0);
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
/* doorbell programming is done for every playback */
adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
(adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i, i);
WREG32_SOC15(VCN, 0, regVCN_JPEG_DB_CTRL,
ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
VCN_JPEG_DB_CTRL__EN_MASK);
WREG32_SOC15(VCN, i, regVCN_JPEG_DB_CTRL,
ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
VCN_JPEG_DB_CTRL__EN_MASK);
/* disable power gating */
r = jpeg_v4_0_5_disable_static_power_gating(adev);
if (r)
return r;
if (adev->pg_flags & AMD_PG_SUPPORT_JPEG_DPG) {
jpeg_v4_0_5_start_dpg_mode(adev, i, adev->jpeg.indirect_sram);
continue;
}
/* JPEG disable CGC */
jpeg_v4_0_5_disable_clock_gating(adev);
/* disable power gating */
r = jpeg_v4_0_5_disable_static_power_gating(adev, i);
if (r)
return r;
/* MJPEG global tiling registers */
WREG32_SOC15(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG,
adev->gfx.config.gb_addr_config);
/* JPEG disable CGC */
jpeg_v4_0_5_disable_clock_gating(adev, i);
/* MJPEG global tiling registers */
WREG32_SOC15(JPEG, i, regJPEG_DEC_GFX10_ADDR_CONFIG,
adev->gfx.config.gb_addr_config);
/* enable JMI channel */
WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL), 0,
~UVD_JMI_CNTL__SOFT_RESET_MASK);
/* enable JMI channel */
WREG32_P(SOC15_REG_OFFSET(JPEG, i, regUVD_JMI_CNTL), 0,
~UVD_JMI_CNTL__SOFT_RESET_MASK);
/* enable System Interrupt for JRBC */
WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regJPEG_SYS_INT_EN),
JPEG_SYS_INT_EN__DJRBC_MASK,
~JPEG_SYS_INT_EN__DJRBC_MASK);
/* enable System Interrupt for JRBC */
WREG32_P(SOC15_REG_OFFSET(JPEG, i, regJPEG_SYS_INT_EN),
JPEG_SYS_INT_EN__DJRBC_MASK,
~JPEG_SYS_INT_EN__DJRBC_MASK);
WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_VMID, 0);
WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
upper_32_bits(ring->gpu_addr));
WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_RPTR, 0);
WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR, 0);
WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_CNTL, 0x00000002L);
WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_SIZE, ring->ring_size / 4);
ring->wptr = RREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR);
WREG32_SOC15(JPEG, i, regUVD_LMI_JRBC_RB_VMID, 0);
WREG32_SOC15(JPEG, i, regUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
WREG32_SOC15(JPEG, i, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
WREG32_SOC15(JPEG, i, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
upper_32_bits(ring->gpu_addr));
WREG32_SOC15(JPEG, i, regUVD_JRBC_RB_RPTR, 0);
WREG32_SOC15(JPEG, i, regUVD_JRBC_RB_WPTR, 0);
WREG32_SOC15(JPEG, i, regUVD_JRBC_RB_CNTL, 0x00000002L);
WREG32_SOC15(JPEG, i, regUVD_JRBC_RB_SIZE, ring->ring_size / 4);
ring->wptr = RREG32_SOC15(JPEG, i, regUVD_JRBC_RB_WPTR);
}
return 0;
}
@ -382,19 +506,26 @@ static int jpeg_v4_0_5_start(struct amdgpu_device *adev)
*/
static int jpeg_v4_0_5_stop(struct amdgpu_device *adev)
{
int r;
int r, i;
/* reset JMI */
WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL),
UVD_JMI_CNTL__SOFT_RESET_MASK,
~UVD_JMI_CNTL__SOFT_RESET_MASK);
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
if (adev->pg_flags & AMD_PG_SUPPORT_JPEG_DPG) {
jpeg_v4_0_5_enable_clock_gating(adev);
jpeg_v4_0_5_stop_dpg_mode(adev, i);
continue;
}
/* reset JMI */
WREG32_P(SOC15_REG_OFFSET(JPEG, i, regUVD_JMI_CNTL),
UVD_JMI_CNTL__SOFT_RESET_MASK,
~UVD_JMI_CNTL__SOFT_RESET_MASK);
/* enable power gating */
r = jpeg_v4_0_5_enable_static_power_gating(adev);
if (r)
return r;
jpeg_v4_0_5_enable_clock_gating(adev, i);
/* enable power gating */
r = jpeg_v4_0_5_enable_static_power_gating(adev, i);
if (r)
return r;
}
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_jpeg(adev, false);
@ -478,13 +609,20 @@ static int jpeg_v4_0_5_set_clockgating_state(void *handle,
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
int i;
if (enable) {
if (!jpeg_v4_0_5_is_idle(handle))
return -EBUSY;
jpeg_v4_0_5_enable_clock_gating(adev);
} else {
jpeg_v4_0_5_disable_clock_gating(adev);
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
if (adev->jpeg.harvest_config & (1 << i))
continue;
if (enable) {
if (!jpeg_v4_0_5_is_idle(handle))
return -EBUSY;
jpeg_v4_0_5_enable_clock_gating(adev, i);
} else {
jpeg_v4_0_5_disable_clock_gating(adev, i);
}
}
return 0;
@ -589,8 +727,15 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_5_dec_ring_vm_funcs = {
static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev)
{
adev->jpeg.inst->ring_dec->funcs = &jpeg_v4_0_5_dec_ring_vm_funcs;
DRM_DEV_INFO(adev->dev, "JPEG decode is enabled in VM mode\n");
int i;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
if (adev->jpeg.harvest_config & (1 << i))
continue;
adev->jpeg.inst[i].ring_dec->funcs = &jpeg_v4_0_5_dec_ring_vm_funcs;
DRM_DEV_INFO(adev->dev, "JPEG%d decode is enabled in VM mode\n", i);
}
}
static const struct amdgpu_irq_src_funcs jpeg_v4_0_5_irq_funcs = {
@ -599,8 +744,15 @@ static const struct amdgpu_irq_src_funcs jpeg_v4_0_5_irq_funcs = {
static void jpeg_v4_0_5_set_irq_funcs(struct amdgpu_device *adev)
{
adev->jpeg.inst->irq.num_types = 1;
adev->jpeg.inst->irq.funcs = &jpeg_v4_0_5_irq_funcs;
int i;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
if (adev->jpeg.harvest_config & (1 << i))
continue;
adev->jpeg.inst[i].irq.num_types = 1;
adev->jpeg.inst[i].irq.funcs = &jpeg_v4_0_5_irq_funcs;
}
}
const struct amdgpu_ip_block_version jpeg_v4_0_5_ip_block = {

View File

@ -0,0 +1,570 @@
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "amdgpu_jpeg.h"
#include "amdgpu_pm.h"
#include "soc15.h"
#include "soc15d.h"
#include "jpeg_v4_0_3.h"
#include "vcn/vcn_5_0_0_offset.h"
#include "vcn/vcn_5_0_0_sh_mask.h"
#include "ivsrcid/vcn/irqsrcs_vcn_4_0.h"
static void jpeg_v5_0_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v5_0_0_set_irq_funcs(struct amdgpu_device *adev);
static int jpeg_v5_0_0_set_powergating_state(void *handle,
enum amd_powergating_state state);
/**
* jpeg_v5_0_0_early_init - set function pointers
*
* @handle: amdgpu_device pointer
*
* Set ring and irq function pointers
*/
static int jpeg_v5_0_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->jpeg.num_jpeg_inst = 1;
adev->jpeg.num_jpeg_rings = 1;
jpeg_v5_0_0_set_dec_ring_funcs(adev);
jpeg_v5_0_0_set_irq_funcs(adev);
return 0;
}
/**
* jpeg_v5_0_0_sw_init - sw init for JPEG block
*
* @handle: amdgpu_device pointer
*
* Load firmware and sw initialization
*/
static int jpeg_v5_0_0_sw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring;
int r;
/* JPEG TRAP */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
VCN_4_0__SRCID__JPEG_DECODE, &adev->jpeg.inst->irq);
if (r)
return r;
r = amdgpu_jpeg_sw_init(adev);
if (r)
return r;
r = amdgpu_jpeg_resume(adev);
if (r)
return r;
ring = adev->jpeg.inst->ring_dec;
ring->use_doorbell = true;
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
ring->vm_hub = AMDGPU_MMHUB0(0);
sprintf(ring->name, "jpeg_dec");
r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0,
AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
adev->jpeg.internal.jpeg_pitch[0] = regUVD_JPEG_PITCH_INTERNAL_OFFSET;
adev->jpeg.inst->external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_PITCH);
return 0;
}
/**
* jpeg_v5_0_0_sw_fini - sw fini for JPEG block
*
* @handle: amdgpu_device pointer
*
* JPEG suspend and free up sw allocation
*/
static int jpeg_v5_0_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
r = amdgpu_jpeg_suspend(adev);
if (r)
return r;
r = amdgpu_jpeg_sw_fini(adev);
return r;
}
/**
* jpeg_v5_0_0_hw_init - start and test JPEG block
*
* @handle: amdgpu_device pointer
*
*/
static int jpeg_v5_0_0_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
int r;
adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
(adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0);
WREG32_SOC15(VCN, 0, regVCN_JPEG_DB_CTRL,
ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
VCN_JPEG_DB_CTRL__EN_MASK);
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
DRM_DEV_INFO(adev->dev, "JPEG decode initialized successfully.\n");
return 0;
}
/**
* jpeg_v5_0_0_hw_fini - stop the hardware block
*
* @handle: amdgpu_device pointer
*
* Stop the JPEG block, mark ring as not ready any more
*/
static int jpeg_v5_0_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
cancel_delayed_work_sync(&adev->vcn.idle_work);
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS))
jpeg_v5_0_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
return 0;
}
/**
* jpeg_v5_0_0_suspend - suspend JPEG block
*
* @handle: amdgpu_device pointer
*
* HW fini and suspend JPEG block
*/
static int jpeg_v5_0_0_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
r = jpeg_v5_0_0_hw_fini(adev);
if (r)
return r;
r = amdgpu_jpeg_suspend(adev);
return r;
}
/**
* jpeg_v5_0_0_resume - resume JPEG block
*
* @handle: amdgpu_device pointer
*
* Resume firmware and hw init JPEG block
*/
static int jpeg_v5_0_0_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
r = amdgpu_jpeg_resume(adev);
if (r)
return r;
r = jpeg_v5_0_0_hw_init(adev);
return r;
}
static void jpeg_v5_0_0_disable_clock_gating(struct amdgpu_device *adev)
{
uint32_t data = 0;
WREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE, data);
data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL);
data &= ~(JPEG_CGC_CTRL__JPEG0_DEC_MODE_MASK
| JPEG_CGC_CTRL__JPEG_ENC_MODE_MASK);
WREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL, data);
}
static void jpeg_v5_0_0_enable_clock_gating(struct amdgpu_device *adev)
{
uint32_t data = 0;
data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL);
data |= 1 << JPEG_CGC_CTRL__JPEG0_DEC_MODE__SHIFT;
WREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL, data);
data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE);
data |= (JPEG_CGC_GATE__JPEG0_DEC_MASK
|JPEG_CGC_GATE__JPEG_ENC_MASK
|JPEG_CGC_GATE__JMCIF_MASK
|JPEG_CGC_GATE__JRBBM_MASK);
WREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE, data);
}
static int jpeg_v5_0_0_disable_static_power_gating(struct amdgpu_device *adev)
{
uint32_t data = 0;
data = 1 << UVD_IPX_DLDO_CONFIG__ONO1_PWR_CONFIG__SHIFT;
WREG32_SOC15(JPEG, 0, regUVD_IPX_DLDO_CONFIG, data);
SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_IPX_DLDO_STATUS, 0,
UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS_MASK);
/* disable anti hang mechanism */
WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), 0,
~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
/* keep the JPEG in static PG mode */
WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), 0,
~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK);
return 0;
}
static int jpeg_v5_0_0_enable_static_power_gating(struct amdgpu_device *adev)
{
/* enable anti hang mechanism */
WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS),
UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
WREG32(SOC15_REG_OFFSET(JPEG, 0, regUVD_IPX_DLDO_CONFIG),
2 << UVD_IPX_DLDO_CONFIG__ONO1_PWR_CONFIG__SHIFT);
SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_IPX_DLDO_STATUS,
1 << UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS__SHIFT,
UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS_MASK);
}
return 0;
}
/**
* jpeg_v5_0_0_start - start JPEG block
*
* @adev: amdgpu_device pointer
*
* Setup and start the JPEG block
*/
static int jpeg_v5_0_0_start(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
int r;
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_jpeg(adev, true);
/* disable power gating */
r = jpeg_v5_0_0_disable_static_power_gating(adev);
if (r)
return r;
/* JPEG disable CGC */
jpeg_v5_0_0_disable_clock_gating(adev);
/* MJPEG global tiling registers */
WREG32_SOC15(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG,
adev->gfx.config.gb_addr_config);
/* enable JMI channel */
WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL), 0,
~UVD_JMI_CNTL__SOFT_RESET_MASK);
/* enable System Interrupt for JRBC */
WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regJPEG_SYS_INT_EN),
JPEG_SYS_INT_EN__DJRBC0_MASK,
~JPEG_SYS_INT_EN__DJRBC0_MASK);
WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_VMID, 0);
WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
upper_32_bits(ring->gpu_addr));
WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_RPTR, 0);
WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR, 0);
WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_CNTL, 0x00000002L);
WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_SIZE, ring->ring_size / 4);
ring->wptr = RREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR);
return 0;
}
/**
* jpeg_v5_0_0_stop - stop JPEG block
*
* @adev: amdgpu_device pointer
*
* stop the JPEG block
*/
static int jpeg_v5_0_0_stop(struct amdgpu_device *adev)
{
int r;
/* reset JMI */
WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL),
UVD_JMI_CNTL__SOFT_RESET_MASK,
~UVD_JMI_CNTL__SOFT_RESET_MASK);
jpeg_v5_0_0_enable_clock_gating(adev);
/* enable power gating */
r = jpeg_v5_0_0_enable_static_power_gating(adev);
if (r)
return r;
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_jpeg(adev, false);
return 0;
}
/**
* jpeg_v5_0_0_dec_ring_get_rptr - get read pointer
*
* @ring: amdgpu_ring pointer
*
* Returns the current hardware read pointer
*/
static uint64_t jpeg_v5_0_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
return RREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_RPTR);
}
/**
* jpeg_v5_0_0_dec_ring_get_wptr - get write pointer
*
* @ring: amdgpu_ring pointer
*
* Returns the current hardware write pointer
*/
static uint64_t jpeg_v5_0_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
if (ring->use_doorbell)
return *ring->wptr_cpu_addr;
else
return RREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR);
}
/**
* jpeg_v5_0_0_dec_ring_set_wptr - set write pointer
*
* @ring: amdgpu_ring pointer
*
* Commits the write pointer to the hardware
*/
static void jpeg_v5_0_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
if (ring->use_doorbell) {
*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
} else {
WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
}
}
static bool jpeg_v5_0_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret = 1;
ret &= (((RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS) &
UVD_JRBC_STATUS__RB_JOB_DONE_MASK) ==
UVD_JRBC_STATUS__RB_JOB_DONE_MASK));
return ret;
}
static int jpeg_v5_0_0_wait_for_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_JRBC_STATUS,
UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
}
static int jpeg_v5_0_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
if (enable) {
if (!jpeg_v5_0_0_is_idle(handle))
return -EBUSY;
jpeg_v5_0_0_enable_clock_gating(adev);
} else {
jpeg_v5_0_0_disable_clock_gating(adev);
}
return 0;
}
static int jpeg_v5_0_0_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret;
if (state == adev->jpeg.cur_state)
return 0;
if (state == AMD_PG_STATE_GATE)
ret = jpeg_v5_0_0_stop(adev);
else
ret = jpeg_v5_0_0_start(adev);
if (!ret)
adev->jpeg.cur_state = state;
return ret;
}
static int jpeg_v5_0_0_set_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned int type,
enum amdgpu_interrupt_state state)
{
return 0;
}
static int jpeg_v5_0_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
DRM_DEBUG("IH: JPEG TRAP\n");
switch (entry->src_id) {
case VCN_4_0__SRCID__JPEG_DECODE:
amdgpu_fence_process(adev->jpeg.inst->ring_dec);
break;
default:
DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n",
entry->src_id, entry->src_data[0]);
break;
}
return 0;
}
static const struct amd_ip_funcs jpeg_v5_0_0_ip_funcs = {
.name = "jpeg_v5_0_0",
.early_init = jpeg_v5_0_0_early_init,
.late_init = NULL,
.sw_init = jpeg_v5_0_0_sw_init,
.sw_fini = jpeg_v5_0_0_sw_fini,
.hw_init = jpeg_v5_0_0_hw_init,
.hw_fini = jpeg_v5_0_0_hw_fini,
.suspend = jpeg_v5_0_0_suspend,
.resume = jpeg_v5_0_0_resume,
.is_idle = jpeg_v5_0_0_is_idle,
.wait_for_idle = jpeg_v5_0_0_wait_for_idle,
.check_soft_reset = NULL,
.pre_soft_reset = NULL,
.soft_reset = NULL,
.post_soft_reset = NULL,
.set_clockgating_state = jpeg_v5_0_0_set_clockgating_state,
.set_powergating_state = jpeg_v5_0_0_set_powergating_state,
};
static const struct amdgpu_ring_funcs jpeg_v5_0_0_dec_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_JPEG,
.align_mask = 0xf,
.get_rptr = jpeg_v5_0_0_dec_ring_get_rptr,
.get_wptr = jpeg_v5_0_0_dec_ring_get_wptr,
.set_wptr = jpeg_v5_0_0_dec_ring_set_wptr,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
8 + /* jpeg_v5_0_0_dec_ring_emit_vm_flush */
22 + 22 + /* jpeg_v5_0_0_dec_ring_emit_fence x2 vm fence */
8 + 16,
.emit_ib_size = 22, /* jpeg_v5_0_0_dec_ring_emit_ib */
.emit_ib = jpeg_v4_0_3_dec_ring_emit_ib,
.emit_fence = jpeg_v4_0_3_dec_ring_emit_fence,
.emit_vm_flush = jpeg_v4_0_3_dec_ring_emit_vm_flush,
.test_ring = amdgpu_jpeg_dec_ring_test_ring,
.test_ib = amdgpu_jpeg_dec_ring_test_ib,
.insert_nop = jpeg_v4_0_3_dec_ring_nop,
.insert_start = jpeg_v4_0_3_dec_ring_insert_start,
.insert_end = jpeg_v4_0_3_dec_ring_insert_end,
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_jpeg_ring_begin_use,
.end_use = amdgpu_jpeg_ring_end_use,
.emit_wreg = jpeg_v4_0_3_dec_ring_emit_wreg,
.emit_reg_wait = jpeg_v4_0_3_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
static void jpeg_v5_0_0_set_dec_ring_funcs(struct amdgpu_device *adev)
{
adev->jpeg.inst->ring_dec->funcs = &jpeg_v5_0_0_dec_ring_vm_funcs;
DRM_DEV_INFO(adev->dev, "JPEG decode is enabled in VM mode\n");
}
static const struct amdgpu_irq_src_funcs jpeg_v5_0_0_irq_funcs = {
.set = jpeg_v5_0_0_set_interrupt_state,
.process = jpeg_v5_0_0_process_interrupt,
};
static void jpeg_v5_0_0_set_irq_funcs(struct amdgpu_device *adev)
{
adev->jpeg.inst->irq.num_types = 1;
adev->jpeg.inst->irq.funcs = &jpeg_v5_0_0_irq_funcs;
}
const struct amdgpu_ip_block_version jpeg_v5_0_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_JPEG,
.major = 5,
.minor = 0,
.rev = 0,
.funcs = &jpeg_v5_0_0_ip_funcs,
};

View File

@ -0,0 +1,29 @@
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef __JPEG_V5_0_0_H__
#define __JPEG_V5_0_0_H__
extern const struct amdgpu_ip_block_version jpeg_v5_0_0_ip_block;
#endif /* __JPEG_V5_0_0_H__ */

View File

@ -0,0 +1,121 @@
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/delay.h>
#include "amdgpu.h"
#include "lsdma_v7_0.h"
#include "amdgpu_lsdma.h"
#include "lsdma/lsdma_7_0_0_offset.h"
#include "lsdma/lsdma_7_0_0_sh_mask.h"
static int lsdma_v7_0_wait_pio_status(struct amdgpu_device *adev)
{
return amdgpu_lsdma_wait_for(adev, SOC15_REG_OFFSET(LSDMA, 0, regLSDMA_PIO_STATUS),
LSDMA_PIO_STATUS__PIO_IDLE_MASK | LSDMA_PIO_STATUS__PIO_FIFO_EMPTY_MASK,
LSDMA_PIO_STATUS__PIO_IDLE_MASK | LSDMA_PIO_STATUS__PIO_FIFO_EMPTY_MASK);
}
static int lsdma_v7_0_copy_mem(struct amdgpu_device *adev,
uint64_t src_addr,
uint64_t dst_addr,
uint64_t size)
{
int ret;
uint32_t tmp;
WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_SRC_ADDR_LO, lower_32_bits(src_addr));
WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_SRC_ADDR_HI, upper_32_bits(src_addr));
WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_DST_ADDR_LO, lower_32_bits(dst_addr));
WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_DST_ADDR_HI, upper_32_bits(dst_addr));
WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_CONTROL, 0x0);
tmp = RREG32_SOC15(LSDMA, 0, regLSDMA_PIO_COMMAND);
tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, BYTE_COUNT, size);
tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, SRC_LOCATION, 0);
tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, DST_LOCATION, 0);
tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, SRC_ADDR_INC, 0);
tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, DST_ADDR_INC, 0);
tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, OVERLAP_DISABLE, 0);
tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, CONSTANT_FILL, 0);
WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_COMMAND, tmp);
ret = lsdma_v7_0_wait_pio_status(adev);
if (ret)
dev_err(adev->dev, "LSDMA PIO failed to copy memory!\n");
return ret;
}
static int lsdma_v7_0_fill_mem(struct amdgpu_device *adev,
uint64_t dst_addr,
uint32_t data,
uint64_t size)
{
int ret;
uint32_t tmp;
WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_CONSTFILL_DATA, data);
WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_DST_ADDR_LO, lower_32_bits(dst_addr));
WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_DST_ADDR_HI, upper_32_bits(dst_addr));
WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_CONTROL, 0x0);
tmp = RREG32_SOC15(LSDMA, 0, regLSDMA_PIO_COMMAND);
tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, BYTE_COUNT, size);
tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, SRC_LOCATION, 0);
tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, DST_LOCATION, 0);
tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, SRC_ADDR_INC, 0);
tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, DST_ADDR_INC, 0);
tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, OVERLAP_DISABLE, 0);
tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, CONSTANT_FILL, 1);
WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_COMMAND, tmp);
ret = lsdma_v7_0_wait_pio_status(adev);
if (ret)
dev_err(adev->dev, "LSDMA PIO failed to fill memory!\n");
return ret;
}
static void lsdma_v7_0_update_memory_power_gating(struct amdgpu_device *adev,
bool enable)
{
uint32_t tmp;
tmp = RREG32_SOC15(LSDMA, 0, regLSDMA_MEM_POWER_CTRL);
tmp = REG_SET_FIELD(tmp, LSDMA_MEM_POWER_CTRL, MEM_POWER_CTRL_EN, 0);
WREG32_SOC15(LSDMA, 0, regLSDMA_MEM_POWER_CTRL, tmp);
tmp = REG_SET_FIELD(tmp, LSDMA_MEM_POWER_CTRL, MEM_POWER_CTRL_EN, enable);
WREG32_SOC15(LSDMA, 0, regLSDMA_MEM_POWER_CTRL, tmp);
}
const struct amdgpu_lsdma_funcs lsdma_v7_0_funcs = {
.copy_mem = lsdma_v7_0_copy_mem,
.fill_mem = lsdma_v7_0_fill_mem,
.update_memory_power_gating = lsdma_v7_0_update_memory_power_gating
};

View File

@ -0,0 +1,31 @@
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef __LSDMA_V7_0_H__
#define __LSDMA_V7_0_H__
#include "soc15_common.h"
extern const struct amdgpu_lsdma_funcs lsdma_v7_0_funcs;
#endif /* __LSDMA_V7_0_H__ */

View File

@ -98,6 +98,7 @@ mmhub_v3_3_print_l2_protection_fault_status(struct amdgpu_device *adev,
switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
case IP_VERSION(3, 3, 0):
case IP_VERSION(3, 3, 1):
mmhub_cid = mmhub_client_ids_v3_3[cid][rw];
break;
default:

View File

@ -89,7 +89,9 @@ static void nbio_v7_11_vpe_doorbell_range(struct amdgpu_device *adev, int instan
bool use_doorbell, int doorbell_index,
int doorbell_size)
{
u32 reg = SOC15_REG_OFFSET(NBIO, 0, regGDC0_BIF_VPE_DOORBELL_RANGE);
u32 reg = instance == 0 ?
SOC15_REG_OFFSET(NBIO, 0, regGDC0_BIF_VPE_DOORBELL_RANGE) :
SOC15_REG_OFFSET(NBIO, 0, regGDC0_BIF_VPE1_DOORBELL_RANGE);
u32 doorbell_range = RREG32_PCIE_PORT(reg);
if (use_doorbell) {
@ -112,7 +114,10 @@ static void nbio_v7_11_vcn_doorbell_range(struct amdgpu_device *adev,
bool use_doorbell,
int doorbell_index, int instance)
{
u32 reg = SOC15_REG_OFFSET(NBIO, 0, regGDC0_BIF_VCN0_DOORBELL_RANGE);
u32 reg = instance == 0 ?
SOC15_REG_OFFSET(NBIO, 0, regGDC0_BIF_VCN0_DOORBELL_RANGE):
SOC15_REG_OFFSET(NBIO, 0, regGDC0_BIF_VCN1_DOORBELL_RANGE);
u32 doorbell_range = RREG32_PCIE_PORT(reg);
if (use_doorbell) {

View File

@ -296,6 +296,7 @@ enum psp_gfx_fw_type {
GFX_FW_TYPE_VPEC_FW1 = 100, /* VPEC FW1 To Save VPE */
GFX_FW_TYPE_VPEC_FW2 = 101, /* VPEC FW2 To Save VPE */
GFX_FW_TYPE_VPE = 102,
GFX_FW_TYPE_JPEG_RAM = 128, /**< JPEG Command buffer */
GFX_FW_TYPE_P2S_TABLE = 129,
GFX_FW_TYPE_MAX
};

View File

@ -506,7 +506,7 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
* before training, and restore it after training to avoid
* VRAM corruption.
*/
sz = GDDR6_MEM_TRAINING_ENCROACHED_SIZE;
sz = BIST_MEM_TRAINING_ENCROACHED_SIZE;
if (adev->gmc.visible_vram_size < sz || !adev->mman.aper_base_kaddr) {
DRM_ERROR("visible_vram_size %llx or aper_base_kaddr %p is not initialized.\n",

View File

@ -53,6 +53,8 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_6_sos.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_6_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_14_0_0_toc.bin");
MODULE_FIRMWARE("amdgpu/psp_14_0_0_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_14_0_1_toc.bin");
MODULE_FIRMWARE("amdgpu/psp_14_0_1_ta.bin");
/* For large FW files the time to complete can be very long */
#define USBC_PD_POLLING_LIMIT_S 240
@ -101,6 +103,7 @@ static int psp_v13_0_init_microcode(struct psp_context *psp)
case IP_VERSION(13, 0, 8):
case IP_VERSION(13, 0, 11):
case IP_VERSION(14, 0, 0):
case IP_VERSION(14, 0, 1):
err = psp_init_toc_microcode(psp, ucode_prefix);
if (err)
return err;
@ -561,7 +564,7 @@ static int psp_v13_0_memory_training(struct psp_context *psp, uint32_t ops)
* before training, and restore it after training to avoid
* VRAM corruption.
*/
sz = GDDR6_MEM_TRAINING_ENCROACHED_SIZE;
sz = BIST_MEM_TRAINING_ENCROACHED_SIZE;
if (adev->gmc.visible_vram_size < sz || !adev->mman.aper_base_kaddr) {
dev_err(adev->dev, "visible_vram_size %llx or aper_base_kaddr %p is not initialized.\n",

View File

@ -0,0 +1,672 @@
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <drm/drm_drv.h>
#include <linux/vmalloc.h>
#include "amdgpu.h"
#include "amdgpu_psp.h"
#include "amdgpu_ucode.h"
#include "soc15_common.h"
#include "psp_v14_0.h"
#include "mp/mp_14_0_2_offset.h"
#include "mp/mp_14_0_2_sh_mask.h"
MODULE_FIRMWARE("amdgpu/psp_14_0_2_sos.bin");
MODULE_FIRMWARE("amdgpu/psp_14_0_3_sos.bin");
/* For large FW files the time to complete can be very long */
#define USBC_PD_POLLING_LIMIT_S 240
/* Read USB-PD from LFB */
#define GFX_CMD_USB_PD_USE_LFB 0x480
/* VBIOS gfl defines */
#define MBOX_READY_MASK 0x80000000
#define MBOX_STATUS_MASK 0x0000FFFF
#define MBOX_COMMAND_MASK 0x00FF0000
#define MBOX_READY_FLAG 0x80000000
#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_LO 0x2
#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_HI 0x3
#define C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE 0x4
/* memory training timeout define */
#define MEM_TRAIN_SEND_MSG_TIMEOUT_US 3000000
static int psp_v14_0_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
char ucode_prefix[30];
int err = 0;
amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
case IP_VERSION(14, 0, 2):
case IP_VERSION(14, 0, 3):
err = psp_init_sos_microcode(psp, ucode_prefix);
if (err)
return err;
break;
default:
BUG();
}
return 0;
}
static bool psp_v14_0_is_sos_alive(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
uint32_t sol_reg;
sol_reg = RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_81);
return sol_reg != 0x0;
}
static int psp_v14_0_wait_for_bootloader(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
int ret;
int retry_loop;
for (retry_loop = 0; retry_loop < 10; retry_loop++) {
/* Wait for bootloader to signify that is
ready having bit 31 of C2PMSG_35 set to 1 */
ret = psp_wait_for(psp,
SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
0x80000000,
0x80000000,
false);
if (ret == 0)
return 0;
}
return ret;
}
static int psp_v14_0_bootloader_load_component(struct psp_context *psp,
struct psp_bin_desc *bin_desc,
enum psp_bootloader_cmd bl_cmd)
{
int ret;
uint32_t psp_gfxdrv_command_reg = 0;
struct amdgpu_device *adev = psp->adev;
/* Check tOS sign of life register to confirm sys driver and sOS
* are already been loaded.
*/
if (psp_v14_0_is_sos_alive(psp))
return 0;
ret = psp_v14_0_wait_for_bootloader(psp);
if (ret)
return ret;
memset(psp->fw_pri_buf, 0, PSP_1_MEG);
/* Copy PSP KDB binary to memory */
memcpy(psp->fw_pri_buf, bin_desc->start_addr, bin_desc->size_bytes);
/* Provide the PSP KDB to bootloader */
WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_36,
(uint32_t)(psp->fw_pri_mc_addr >> 20));
psp_gfxdrv_command_reg = bl_cmd;
WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_35,
psp_gfxdrv_command_reg);
ret = psp_v14_0_wait_for_bootloader(psp);
return ret;
}
static int psp_v14_0_bootloader_load_kdb(struct psp_context *psp)
{
return psp_v14_0_bootloader_load_component(psp, &psp->kdb, PSP_BL__LOAD_KEY_DATABASE);
}
static int psp_v14_0_bootloader_load_spl(struct psp_context *psp)
{
return psp_v14_0_bootloader_load_component(psp, &psp->kdb, PSP_BL__LOAD_TOS_SPL_TABLE);
}
static int psp_v14_0_bootloader_load_sysdrv(struct psp_context *psp)
{
return psp_v14_0_bootloader_load_component(psp, &psp->sys, PSP_BL__LOAD_SYSDRV);
}
static int psp_v14_0_bootloader_load_soc_drv(struct psp_context *psp)
{
return psp_v14_0_bootloader_load_component(psp, &psp->soc_drv, PSP_BL__LOAD_SOCDRV);
}
static int psp_v14_0_bootloader_load_intf_drv(struct psp_context *psp)
{
return psp_v14_0_bootloader_load_component(psp, &psp->intf_drv, PSP_BL__LOAD_INTFDRV);
}
static int psp_v14_0_bootloader_load_dbg_drv(struct psp_context *psp)
{
return psp_v14_0_bootloader_load_component(psp, &psp->dbg_drv, PSP_BL__LOAD_DBGDRV);
}
static int psp_v14_0_bootloader_load_ras_drv(struct psp_context *psp)
{
return psp_v14_0_bootloader_load_component(psp, &psp->ras_drv, PSP_BL__LOAD_RASDRV);
}
static int psp_v14_0_bootloader_load_sos(struct psp_context *psp)
{
int ret;
unsigned int psp_gfxdrv_command_reg = 0;
struct amdgpu_device *adev = psp->adev;
/* Check sOS sign of life register to confirm sys driver and sOS
* are already been loaded.
*/
if (psp_v14_0_is_sos_alive(psp))
return 0;
ret = psp_v14_0_wait_for_bootloader(psp);
if (ret)
return ret;
memset(psp->fw_pri_buf, 0, PSP_1_MEG);
/* Copy Secure OS binary to PSP memory */
memcpy(psp->fw_pri_buf, psp->sos.start_addr, psp->sos.size_bytes);
/* Provide the PSP secure OS to bootloader */
WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_36,
(uint32_t)(psp->fw_pri_mc_addr >> 20));
psp_gfxdrv_command_reg = PSP_BL__LOAD_SOSDRV;
WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_35,
psp_gfxdrv_command_reg);
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_81),
RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_81),
0, true);
return ret;
}
static int psp_v14_0_ring_stop(struct psp_context *psp,
enum psp_ring_type ring_type)
{
int ret = 0;
struct amdgpu_device *adev = psp->adev;
if (amdgpu_sriov_vf(adev)) {
/* Write the ring destroy command*/
WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_101,
GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_101),
0x80000000, 0x80000000, false);
} else {
/* Write the ring destroy command*/
WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_64,
GFX_CTRL_CMD_ID_DESTROY_RINGS);
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64),
0x80000000, 0x80000000, false);
}
return ret;
}
static int psp_v14_0_ring_create(struct psp_context *psp,
enum psp_ring_type ring_type)
{
int ret = 0;
unsigned int psp_ring_reg = 0;
struct psp_ring *ring = &psp->km_ring;
struct amdgpu_device *adev = psp->adev;
if (amdgpu_sriov_vf(adev)) {
ret = psp_v14_0_ring_stop(psp, ring_type);
if (ret) {
DRM_ERROR("psp_v14_0_ring_stop_sriov failed!\n");
return ret;
}
/* Write low address of the ring to C2PMSG_102 */
psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_102, psp_ring_reg);
/* Write high address of the ring to C2PMSG_103 */
psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_103, psp_ring_reg);
/* Write the ring initialization command to C2PMSG_101 */
WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_101,
GFX_CTRL_CMD_ID_INIT_GPCOM_RING);
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_101 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_101),
0x80000000, 0x8000FFFF, false);
} else {
/* Wait for sOS ready for ring creation */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64),
0x80000000, 0x80000000, false);
if (ret) {
DRM_ERROR("Failed to wait for trust OS ready for ring creation\n");
return ret;
}
/* Write low address of the ring to C2PMSG_69 */
psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_69, psp_ring_reg);
/* Write high address of the ring to C2PMSG_70 */
psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_70, psp_ring_reg);
/* Write size of ring to C2PMSG_71 */
psp_ring_reg = ring->ring_size;
WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_71, psp_ring_reg);
/* Write the ring initialization command to C2PMSG_64 */
psp_ring_reg = ring_type;
psp_ring_reg = psp_ring_reg << 16;
WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_64, psp_ring_reg);
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_64 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64),
0x80000000, 0x8000FFFF, false);
}
return ret;
}
static int psp_v14_0_ring_destroy(struct psp_context *psp,
enum psp_ring_type ring_type)
{
int ret = 0;
struct psp_ring *ring = &psp->km_ring;
struct amdgpu_device *adev = psp->adev;
ret = psp_v14_0_ring_stop(psp, ring_type);
if (ret)
DRM_ERROR("Fail to stop psp ring\n");
amdgpu_bo_free_kernel(&adev->firmware.rbuf,
&ring->ring_mem_mc_addr,
(void **)&ring->ring_mem);
return ret;
}
static uint32_t psp_v14_0_ring_get_wptr(struct psp_context *psp)
{
uint32_t data;
struct amdgpu_device *adev = psp->adev;
if (amdgpu_sriov_vf(adev))
data = RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_102);
else
data = RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_67);
return data;
}
static void psp_v14_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
{
struct amdgpu_device *adev = psp->adev;
if (amdgpu_sriov_vf(adev)) {
WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_102, value);
WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_101,
GFX_CTRL_CMD_ID_CONSUME_CMD);
} else
WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_67, value);
}
static int psp_v14_0_memory_training_send_msg(struct psp_context *psp, int msg)
{
int ret;
int i;
uint32_t data_32;
int max_wait;
struct amdgpu_device *adev = psp->adev;
data_32 = (psp->mem_train_ctx.c2p_train_data_offset >> 20);
WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_36, data_32);
WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_35, msg);
max_wait = MEM_TRAIN_SEND_MSG_TIMEOUT_US / adev->usec_timeout;
for (i = 0; i < max_wait; i++) {
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
0x80000000, 0x80000000, false);
if (ret == 0)
break;
}
if (i < max_wait)
ret = 0;
else
ret = -ETIME;
dev_dbg(adev->dev, "training %s %s, cost %d @ %d ms\n",
(msg == PSP_BL__DRAM_SHORT_TRAIN) ? "short" : "long",
(ret == 0) ? "succeed" : "failed",
i, adev->usec_timeout/1000);
return ret;
}
static int psp_v14_0_memory_training(struct psp_context *psp, uint32_t ops)
{
struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
uint32_t *pcache = (uint32_t *)ctx->sys_cache;
struct amdgpu_device *adev = psp->adev;
uint32_t p2c_header[4];
uint32_t sz;
void *buf;
int ret, idx;
if (ctx->init == PSP_MEM_TRAIN_NOT_SUPPORT) {
dev_dbg(adev->dev, "Memory training is not supported.\n");
return 0;
} else if (ctx->init != PSP_MEM_TRAIN_INIT_SUCCESS) {
dev_err(adev->dev, "Memory training initialization failure.\n");
return -EINVAL;
}
if (psp_v14_0_is_sos_alive(psp)) {
dev_dbg(adev->dev, "SOS is alive, skip memory training.\n");
return 0;
}
amdgpu_device_vram_access(adev, ctx->p2c_train_data_offset, p2c_header, sizeof(p2c_header), false);
dev_dbg(adev->dev, "sys_cache[%08x,%08x,%08x,%08x] p2c_header[%08x,%08x,%08x,%08x]\n",
pcache[0], pcache[1], pcache[2], pcache[3],
p2c_header[0], p2c_header[1], p2c_header[2], p2c_header[3]);
if (ops & PSP_MEM_TRAIN_SEND_SHORT_MSG) {
dev_dbg(adev->dev, "Short training depends on restore.\n");
ops |= PSP_MEM_TRAIN_RESTORE;
}
if ((ops & PSP_MEM_TRAIN_RESTORE) &&
pcache[0] != MEM_TRAIN_SYSTEM_SIGNATURE) {
dev_dbg(adev->dev, "sys_cache[0] is invalid, restore depends on save.\n");
ops |= PSP_MEM_TRAIN_SAVE;
}
if (p2c_header[0] == MEM_TRAIN_SYSTEM_SIGNATURE &&
!(pcache[0] == MEM_TRAIN_SYSTEM_SIGNATURE &&
pcache[3] == p2c_header[3])) {
dev_dbg(adev->dev, "sys_cache is invalid or out-of-date, need save training data to sys_cache.\n");
ops |= PSP_MEM_TRAIN_SAVE;
}
if ((ops & PSP_MEM_TRAIN_SAVE) &&
p2c_header[0] != MEM_TRAIN_SYSTEM_SIGNATURE) {
dev_dbg(adev->dev, "p2c_header[0] is invalid, save depends on long training.\n");
ops |= PSP_MEM_TRAIN_SEND_LONG_MSG;
}
if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) {
ops &= ~PSP_MEM_TRAIN_SEND_SHORT_MSG;
ops |= PSP_MEM_TRAIN_SAVE;
}
dev_dbg(adev->dev, "Memory training ops:%x.\n", ops);
if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) {
/*
* Long training will encroach a certain amount on the bottom of VRAM;
* save the content from the bottom of VRAM to system memory
* before training, and restore it after training to avoid
* VRAM corruption.
*/
sz = BIST_MEM_TRAINING_ENCROACHED_SIZE;
if (adev->gmc.visible_vram_size < sz || !adev->mman.aper_base_kaddr) {
dev_err(adev->dev, "visible_vram_size %llx or aper_base_kaddr %p is not initialized.\n",
adev->gmc.visible_vram_size,
adev->mman.aper_base_kaddr);
return -EINVAL;
}
buf = vmalloc(sz);
if (!buf) {
dev_err(adev->dev, "failed to allocate system memory.\n");
return -ENOMEM;
}
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
memcpy_fromio(buf, adev->mman.aper_base_kaddr, sz);
ret = psp_v14_0_memory_training_send_msg(psp, PSP_BL__DRAM_LONG_TRAIN);
if (ret) {
DRM_ERROR("Send long training msg failed.\n");
vfree(buf);
drm_dev_exit(idx);
return ret;
}
memcpy_toio(adev->mman.aper_base_kaddr, buf, sz);
adev->hdp.funcs->flush_hdp(adev, NULL);
vfree(buf);
drm_dev_exit(idx);
} else {
vfree(buf);
return -ENODEV;
}
}
if (ops & PSP_MEM_TRAIN_SAVE) {
amdgpu_device_vram_access(psp->adev, ctx->p2c_train_data_offset, ctx->sys_cache, ctx->train_data_size, false);
}
if (ops & PSP_MEM_TRAIN_RESTORE) {
amdgpu_device_vram_access(psp->adev, ctx->c2p_train_data_offset, ctx->sys_cache, ctx->train_data_size, true);
}
if (ops & PSP_MEM_TRAIN_SEND_SHORT_MSG) {
ret = psp_v14_0_memory_training_send_msg(psp, (amdgpu_force_long_training > 0) ?
PSP_BL__DRAM_LONG_TRAIN : PSP_BL__DRAM_SHORT_TRAIN);
if (ret) {
dev_err(adev->dev, "send training msg failed.\n");
return ret;
}
}
ctx->training_cnt++;
return 0;
}
static int psp_v14_0_load_usbc_pd_fw(struct psp_context *psp, uint64_t fw_pri_mc_addr)
{
struct amdgpu_device *adev = psp->adev;
uint32_t reg_status;
int ret, i = 0;
/*
* LFB address which is aligned to 1MB address and has to be
* right-shifted by 20 so that LFB address can be passed on a 32-bit C2P
* register
*/
WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_36, (fw_pri_mc_addr >> 20));
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
0x80000000, 0x80000000, false);
if (ret)
return ret;
/* Fireup interrupt so PSP can pick up the address */
WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_35, (GFX_CMD_USB_PD_USE_LFB << 16));
/* FW load takes very long time */
do {
msleep(1000);
reg_status = RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_35);
if (reg_status & 0x80000000)
goto done;
} while (++i < USBC_PD_POLLING_LIMIT_S);
return -ETIME;
done:
if ((reg_status & 0xFFFF) != 0) {
DRM_ERROR("Address load failed - MP0_SMN_C2PMSG_35.Bits [15:0] = %04x\n",
reg_status & 0xFFFF);
return -EIO;
}
return 0;
}
static int psp_v14_0_read_usbc_pd_fw(struct psp_context *psp, uint32_t *fw_ver)
{
struct amdgpu_device *adev = psp->adev;
int ret;
WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_35, C2PMSG_CMD_GFX_USB_PD_FW_VER);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
0x80000000, 0x80000000, false);
if (!ret)
*fw_ver = RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_36);
return ret;
}
static int psp_v14_0_exec_spi_cmd(struct psp_context *psp, int cmd)
{
uint32_t reg_status = 0, reg_val = 0;
struct amdgpu_device *adev = psp->adev;
int ret;
/* clear MBX ready (MBOX_READY_MASK bit is 0) and set update command */
reg_val |= (cmd << 16);
WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_115, reg_val);
/* Ring the doorbell */
WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_73, 1);
if (cmd == C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE)
ret = psp_wait_for_spirom_update(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
MBOX_READY_FLAG, MBOX_READY_MASK, PSP_SPIROM_UPDATE_TIMEOUT);
else
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
MBOX_READY_FLAG, MBOX_READY_MASK, false);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
MBOX_READY_FLAG, MBOX_READY_MASK, false);
if (ret) {
dev_err(adev->dev, "SPI cmd %x timed out, ret = %d", cmd, ret);
return ret;
}
reg_status = RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_115);
if ((reg_status & 0xFFFF) != 0) {
dev_err(adev->dev, "SPI cmd %x failed, fail status = %04x\n",
cmd, reg_status & 0xFFFF);
return -EIO;
}
return 0;
}
static int psp_v14_0_update_spirom(struct psp_context *psp,
uint64_t fw_pri_mc_addr)
{
struct amdgpu_device *adev = psp->adev;
int ret;
/* Confirm PSP is ready to start */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
MBOX_READY_FLAG, MBOX_READY_MASK, false);
if (ret) {
dev_err(adev->dev, "PSP Not ready to start processing, ret = %d", ret);
return ret;
}
WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_116, lower_32_bits(fw_pri_mc_addr));
ret = psp_v14_0_exec_spi_cmd(psp, C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_LO);
if (ret)
return ret;
WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_116, upper_32_bits(fw_pri_mc_addr));
ret = psp_v14_0_exec_spi_cmd(psp, C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_HI);
if (ret)
return ret;
psp->vbflash_done = true;
ret = psp_v14_0_exec_spi_cmd(psp, C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE);
if (ret)
return ret;
return 0;
}
static int psp_v14_0_vbflash_status(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
return RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_115);
}
static const struct psp_funcs psp_v14_0_funcs = {
.init_microcode = psp_v14_0_init_microcode,
.bootloader_load_kdb = psp_v14_0_bootloader_load_kdb,
.bootloader_load_spl = psp_v14_0_bootloader_load_spl,
.bootloader_load_sysdrv = psp_v14_0_bootloader_load_sysdrv,
.bootloader_load_soc_drv = psp_v14_0_bootloader_load_soc_drv,
.bootloader_load_intf_drv = psp_v14_0_bootloader_load_intf_drv,
.bootloader_load_dbg_drv = psp_v14_0_bootloader_load_dbg_drv,
.bootloader_load_ras_drv = psp_v14_0_bootloader_load_ras_drv,
.bootloader_load_sos = psp_v14_0_bootloader_load_sos,
.ring_create = psp_v14_0_ring_create,
.ring_stop = psp_v14_0_ring_stop,
.ring_destroy = psp_v14_0_ring_destroy,
.ring_get_wptr = psp_v14_0_ring_get_wptr,
.ring_set_wptr = psp_v14_0_ring_set_wptr,
.mem_training = psp_v14_0_memory_training,
.load_usbc_pd_fw = psp_v14_0_load_usbc_pd_fw,
.read_usbc_pd_fw = psp_v14_0_read_usbc_pd_fw,
.update_spirom = psp_v14_0_update_spirom,
.vbflash_stat = psp_v14_0_vbflash_status
};
void psp_v14_0_set_psp_funcs(struct psp_context *psp)
{
psp->funcs = &psp_v14_0_funcs;
}

View File

@ -0,0 +1,32 @@
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef __PSP_V14_0_H__
#define __PSP_V14_0_H__
#include "amdgpu_psp.h"
#define PSP_SPIROM_UPDATE_TIMEOUT 60000 /* 60s */
void psp_v14_0_set_psp_funcs(struct psp_context *psp);
#endif

View File

@ -49,6 +49,7 @@ MODULE_FIRMWARE("amdgpu/sdma_6_0_1.bin");
MODULE_FIRMWARE("amdgpu/sdma_6_0_2.bin");
MODULE_FIRMWARE("amdgpu/sdma_6_0_3.bin");
MODULE_FIRMWARE("amdgpu/sdma_6_1_0.bin");
MODULE_FIRMWARE("amdgpu/sdma_6_1_1.bin");
#define SDMA1_REG_OFFSET 0x600
#define SDMA0_HYP_DEC_REG_START 0x5880

View File

@ -50,13 +50,13 @@ static const struct amd_ip_funcs soc21_common_ip_funcs;
/* SOC21 */
static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn0[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn1[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
};
static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn0 = {
@ -711,6 +711,7 @@ static int soc21_common_early_init(void *handle)
AMD_CG_SUPPORT_BIF_MGCG |
AMD_CG_SUPPORT_BIF_LS;
adev->pg_flags = AMD_PG_SUPPORT_VCN_DPG |
AMD_PG_SUPPORT_JPEG_DPG |
AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_JPEG |
AMD_PG_SUPPORT_GFX_PG;
@ -865,6 +866,7 @@ static int soc21_common_set_clockgating_state(void *handle,
case IP_VERSION(7, 7, 0):
case IP_VERSION(7, 7, 1):
case IP_VERSION(7, 11, 0):
case IP_VERSION(7, 11, 1):
adev->nbio.funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
adev->nbio.funcs->update_medium_grain_light_sleep(adev,

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,37 @@
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef __VCN_V5_0_0_H__
#define __VCN_V5_0_0_H__
#define VCN_VID_SOC_ADDRESS 0x1FC00
#define VCN_AON_SOC_ADDRESS 0x1F800
#define VCN1_VID_SOC_ADDRESS 0x48300
#define VCN1_AON_SOC_ADDRESS 0x48000
#define VCN_VID_IP_ADDRESS 0x0
#define VCN_AON_IP_ADDRESS 0x30000
extern const struct amdgpu_ip_block_version vcn_v5_0_0_ip_block;
#endif /* __VCN_V5_0_0_H__ */

View File

@ -2518,7 +2518,7 @@ static const uint32_t cwsr_trap_gfx11_hex[] = {
0x8b6eff7b, 0x00000400,
0xbfa20045, 0xbf830010,
0xb8fbf803, 0xbfa0fffa,
0x8b6eff7b, 0x00000900,
0x8b6eff7b, 0x00160900,
0xbfa20015, 0x8b6eff7b,
0x000071ff, 0xbfa10008,
0x8b6fff7b, 0x00007080,

View File

@ -81,6 +81,11 @@ var SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT = 11
var SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE = 21
var SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK = 0x800
var SQ_WAVE_TRAPSTS_EXCP_HI_MASK = 0x7000
#if ASIC_FAMILY >= CHIP_PLUM_BONITO
var SQ_WAVE_TRAPSTS_WAVE_START_MASK = 0x20000
var SQ_WAVE_TRAPSTS_WAVE_END_MASK = 0x40000
var SQ_WAVE_TRAPSTS_TRAP_AFTER_INST_MASK = 0x100000
#endif
var SQ_WAVE_MODE_EXCP_EN_SHIFT = 12
var SQ_WAVE_MODE_EXCP_EN_ADDR_WATCH_SHIFT = 19
@ -92,6 +97,16 @@ var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK = 0x003F8000
var SQ_WAVE_MODE_DEBUG_EN_MASK = 0x800
#if ASIC_FAMILY < CHIP_PLUM_BONITO
var S_TRAPSTS_NON_MASKABLE_EXCP_MASK = SQ_WAVE_TRAPSTS_MEM_VIOL_MASK|SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK
#else
var S_TRAPSTS_NON_MASKABLE_EXCP_MASK = SQ_WAVE_TRAPSTS_MEM_VIOL_MASK |\
SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK |\
SQ_WAVE_TRAPSTS_WAVE_START_MASK |\
SQ_WAVE_TRAPSTS_WAVE_END_MASK |\
SQ_WAVE_TRAPSTS_TRAP_AFTER_INST_MASK
#endif
// bits [31:24] unused by SPI debug data
var TTMP11_SAVE_REPLAY_W64H_SHIFT = 31
var TTMP11_SAVE_REPLAY_W64H_MASK = 0x80000000
@ -224,7 +239,7 @@ L_NOT_HALTED:
// Check non-maskable exceptions. memory_violation, illegal_instruction
// and xnack_error exceptions always cause the wave to enter the trap
// handler.
s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_MEM_VIOL_MASK|SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK
s_and_b32 ttmp2, s_save_trapsts, S_TRAPSTS_NON_MASKABLE_EXCP_MASK
s_cbranch_scc1 L_FETCH_2ND_TRAP
// Check for maskable exceptions in trapsts.excp and trapsts.excp_hi.

View File

@ -36,6 +36,7 @@
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/processor.h>
#include "amdgpu_vm.h"
/*
* The primary memory I/O features being added for revisions of gfxip
@ -326,10 +327,16 @@ static void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id)
* with small reserved space for kernel.
* Set them to CANONICAL addresses.
*/
pdd->gpuvm_base = SVM_USER_BASE;
pdd->gpuvm_base = max(SVM_USER_BASE, AMDGPU_VA_RESERVED_BOTTOM);
pdd->gpuvm_limit =
pdd->dev->kfd->shared_resources.gpuvm_size - 1;
/* dGPUs: the reserved space for kernel
* before SVM
*/
pdd->qpd.cwsr_base = SVM_CWSR_BASE;
pdd->qpd.ib_base = SVM_IB_BASE;
pdd->scratch_base = MAKE_SCRATCH_APP_BASE_VI();
pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
}
@ -339,18 +346,18 @@ static void kfd_init_apertures_v9(struct kfd_process_device *pdd, uint8_t id)
pdd->lds_base = MAKE_LDS_APP_BASE_V9();
pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base);
/* Raven needs SVM to support graphic handle, etc. Leave the small
* reserved space before SVM on Raven as well, even though we don't
* have to.
* Set gpuvm_base and gpuvm_limit to CANONICAL addresses so that they
* are used in Thunk to reserve SVM.
*/
pdd->gpuvm_base = SVM_USER_BASE;
pdd->gpuvm_base = AMDGPU_VA_RESERVED_BOTTOM;
pdd->gpuvm_limit =
pdd->dev->kfd->shared_resources.gpuvm_size - 1;
pdd->scratch_base = MAKE_SCRATCH_APP_BASE_V9();
pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
/*
* Place TBA/TMA on opposite side of VM hole to prevent
* stray faults from triggering SVM on these pages.
*/
pdd->qpd.cwsr_base = AMDGPU_VA_RESERVED_TRAP_START(pdd->dev->adev);
}
int kfd_init_apertures(struct kfd_process *process)
@ -407,12 +414,6 @@ int kfd_init_apertures(struct kfd_process *process)
return -EINVAL;
}
}
/* dGPUs: the reserved space for kernel
* before SVM
*/
pdd->qpd.cwsr_base = SVM_CWSR_BASE;
pdd->qpd.ib_base = SVM_IB_BASE;
}
dev_dbg(kfd_device, "node id %u\n", id);

View File

@ -128,6 +128,31 @@ struct mqd_manager {
uint32_t mqd_size;
};
struct mqd_user_context_save_area_header {
/* Byte offset from start of user context
* save area to the last saved top (lowest
* address) of control stack data. Must be
* 4 byte aligned.
*/
uint32_t control_stack_offset;
/* Byte size of the last saved control stack
* data. Must be 4 byte aligned.
*/
uint32_t control_stack_size;
/* Byte offset from start of user context save
* area to the last saved base (lowest address)
* of wave state data. Must be 4 byte aligned.
*/
uint32_t wave_state_offset;
/* Byte size of the last saved wave state data.
* Must be 4 byte aligned.
*/
uint32_t wave_state_size;
};
struct kfd_mem_obj *allocate_hiq_mqd(struct kfd_node *dev,
struct queue_properties *q);

View File

@ -55,8 +55,8 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
m = get_mqd(mqd);
if (has_wa_flag) {
uint32_t wa_mask = minfo->update_flag == UPDATE_FLAG_DBG_WA_ENABLE ?
0xffff : 0xffffffff;
uint32_t wa_mask =
(minfo->update_flag & UPDATE_FLAG_DBG_WA_ENABLE) ? 0xffff : 0xffffffff;
m->compute_static_thread_mgmt_se0 = wa_mask;
m->compute_static_thread_mgmt_se1 = wa_mask;

View File

@ -303,6 +303,15 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
update_cu_mask(mm, mqd, minfo, 0);
set_priority(m, q);
if (minfo && KFD_GC_VERSION(mm->dev) >= IP_VERSION(9, 4, 2)) {
if (minfo->update_flag & UPDATE_FLAG_IS_GWS)
m->compute_resource_limits |=
COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST_MASK;
else
m->compute_resource_limits &=
~COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST_MASK;
}
q->is_active = QUEUE_IS_ACTIVE(*q);
}

View File

@ -532,6 +532,7 @@ struct queue_properties {
enum mqd_update_flag {
UPDATE_FLAG_DBG_WA_ENABLE = 1,
UPDATE_FLAG_DBG_WA_DISABLE = 2,
UPDATE_FLAG_IS_GWS = 4, /* quirk for gfx9 IP */
};
struct mqd_update_info {

View File

@ -95,6 +95,7 @@ void kfd_process_dequeue_from_device(struct kfd_process_device *pdd)
int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
void *gws)
{
struct mqd_update_info minfo = {0};
struct kfd_node *dev = NULL;
struct process_queue_node *pqn;
struct kfd_process_device *pdd;
@ -146,9 +147,10 @@ int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
}
pdd->qpd.num_gws = gws ? dev->adev->gds.gws_size : 0;
minfo.update_flag = gws ? UPDATE_FLAG_IS_GWS : 0;
return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
pqn->q, NULL);
pqn->q, &minfo);
}
void kfd_process_dequeue_from_all_devices(struct kfd_process *p)

View File

@ -1640,12 +1640,10 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
else
mode = UNKNOWN_MEMORY_PARTITION_MODE;
if (pcache->cache_level == 2)
pcache->cache_size = pcache_info[cache_type].cache_size * num_xcc;
else if (mode)
pcache->cache_size = pcache_info[cache_type].cache_size / mode;
else
pcache->cache_size = pcache_info[cache_type].cache_size;
pcache->cache_size = pcache_info[cache_type].cache_size;
/* Partition mode only affects L3 cache size */
if (mode && pcache->cache_level == 3)
pcache->cache_size /= mode;
if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_DATA_CACHE)
pcache->cache_type |= HSA_CACHE_TYPE_DATA;

View File

@ -1939,17 +1939,15 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
adev->dm.hdcp_workqueue = NULL;
}
if (adev->dm.dc)
if (adev->dm.dc) {
dc_deinit_callbacks(adev->dm.dc);
if (adev->dm.dc)
dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
if (dc_enable_dmub_notifications(adev->dm.dc)) {
kfree(adev->dm.dmub_notify);
adev->dm.dmub_notify = NULL;
destroy_workqueue(adev->dm.delayed_hpd_wq);
adev->dm.delayed_hpd_wq = NULL;
if (dc_enable_dmub_notifications(adev->dm.dc)) {
kfree(adev->dm.dmub_notify);
adev->dm.dmub_notify = NULL;
destroy_workqueue(adev->dm.delayed_hpd_wq);
adev->dm.delayed_hpd_wq = NULL;
}
}
if (adev->dm.dmub_bo)
@ -1957,7 +1955,7 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
&adev->dm.dmub_bo_gpu_addr,
&adev->dm.dmub_bo_cpu_addr);
if (adev->dm.hpd_rx_offload_wq) {
if (adev->dm.hpd_rx_offload_wq && adev->dm.dc) {
for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
if (adev->dm.hpd_rx_offload_wq[i].wq) {
destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
@ -2130,7 +2128,8 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_4_MAILBOX
DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_5_TRACEBUFF
DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_6_FW_STATE
DMUB_WINDOW_MEMORY_TYPE_FB //DMUB_WINDOW_7_SCRATCH_MEM
DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_7_SCRATCH_MEM
DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_SHARED_STATE
};
int r;
@ -5254,6 +5253,7 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
struct drm_plane_state *new_plane_state,
struct drm_crtc_state *crtc_state,
struct dc_flip_addrs *flip_addrs,
bool is_psr_su,
bool *dirty_regions_changed)
{
struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
@ -5278,6 +5278,10 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
num_clips = drm_plane_get_damage_clips_count(new_plane_state);
clips = drm_plane_get_damage_clips(new_plane_state);
if (num_clips && (!amdgpu_damage_clips || (amdgpu_damage_clips < 0 &&
is_psr_su)))
goto ffu;
if (!dm_crtc_state->mpo_requested) {
if (!num_clips || num_clips > DC_MAX_DIRTY_RECTS)
goto ffu;
@ -6229,7 +6233,9 @@ create_stream_for_sink(struct drm_connector *connector,
if (recalculate_timing) {
freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
drm_mode_copy(&saved_mode, &mode);
saved_mode.picture_aspect_ratio = mode.picture_aspect_ratio;
drm_mode_copy(&mode, freesync_mode);
mode.picture_aspect_ratio = saved_mode.picture_aspect_ratio;
} else {
decide_crtc_timing_for_drm_display_mode(
&mode, preferred_mode, scale);
@ -6509,7 +6515,8 @@ static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
{
struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
amdgpu_dm_abm_level < 0)
sysfs_remove_group(&connector->kdev->kobj, &amdgpu_group);
drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
@ -6573,9 +6580,12 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
state->vcpi_slots = 0;
state->pbn = 0;
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
state->abm_level = amdgpu_dm_abm_level ?:
ABM_LEVEL_IMMEDIATE_DISABLE;
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
if (amdgpu_dm_abm_level <= 0)
state->abm_level = ABM_LEVEL_IMMEDIATE_DISABLE;
else
state->abm_level = amdgpu_dm_abm_level;
}
__drm_atomic_helper_connector_reset(connector, &state->base);
}
@ -6613,7 +6623,8 @@ amdgpu_dm_connector_late_register(struct drm_connector *connector)
to_amdgpu_dm_connector(connector);
int r;
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
amdgpu_dm_abm_level < 0) {
r = sysfs_create_group(&connector->kdev->kobj,
&amdgpu_group);
if (r)
@ -7643,7 +7654,8 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
if (connector_type == DRM_MODE_CONNECTOR_eDP &&
(dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
(dc_is_dmcu_initialized(adev->dm.dc) ||
adev->dm.dc->ctx->dmub_srv) && amdgpu_dm_abm_level < 0) {
drm_object_attach_property(&aconnector->base.base,
adev->mode_info.abm_level_property, 0);
}
@ -8411,6 +8423,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
fill_dc_dirty_rects(plane, old_plane_state,
new_plane_state, new_crtc_state,
&bundle->flip_addrs[planes_count],
acrtc_state->stream->link->psr_settings.psr_version ==
DC_PSR_VERSION_SU_1,
&dirty_rects_changed);
/*

View File

@ -94,7 +94,7 @@ static void calculate_bandwidth(
const uint32_t s_high = 7;
const uint32_t dmif_chunk_buff_margin = 1;
uint32_t max_chunks_fbc_mode;
uint32_t max_chunks_fbc_mode = 0;
int32_t num_cursor_lines;
int32_t i, j, k;

View File

@ -1850,19 +1850,21 @@ static enum bp_result get_firmware_info_v3_2(
/* Vega12 */
smu_info_v3_2 = GET_IMAGE(struct atom_smu_info_v3_2,
DATA_TABLES(smu_info));
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_2->gpuclk_ss_percentage);
if (!smu_info_v3_2)
return BP_RESULT_BADBIOSTABLE;
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_2->gpuclk_ss_percentage);
info->default_engine_clk = smu_info_v3_2->bootup_dcefclk_10khz * 10;
} else if (revision.minor == 3) {
/* Vega20 */
smu_info_v3_3 = GET_IMAGE(struct atom_smu_info_v3_3,
DATA_TABLES(smu_info));
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_3->gpuclk_ss_percentage);
if (!smu_info_v3_3)
return BP_RESULT_BADBIOSTABLE;
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_3->gpuclk_ss_percentage);
info->default_engine_clk = smu_info_v3_3->bootup_dcefclk_10khz * 10;
}
@ -2422,10 +2424,11 @@ static enum bp_result get_integrated_info_v11(
info_v11 = GET_IMAGE(struct atom_integrated_system_info_v1_11,
DATA_TABLES(integratedsysteminfo));
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v11->gpuclk_ss_percentage);
if (info_v11 == NULL)
return BP_RESULT_BADBIOSTABLE;
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v11->gpuclk_ss_percentage);
info->gpu_cap_info =
le32_to_cpu(info_v11->gpucapinfo);
/*
@ -2637,11 +2640,12 @@ static enum bp_result get_integrated_info_v2_1(
info_v2_1 = GET_IMAGE(struct atom_integrated_system_info_v2_1,
DATA_TABLES(integratedsysteminfo));
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_1->gpuclk_ss_percentage);
if (info_v2_1 == NULL)
return BP_RESULT_BADBIOSTABLE;
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_1->gpuclk_ss_percentage);
info->gpu_cap_info =
le32_to_cpu(info_v2_1->gpucapinfo);
/*
@ -2799,11 +2803,11 @@ static enum bp_result get_integrated_info_v2_2(
info_v2_2 = GET_IMAGE(struct atom_integrated_system_info_v2_2,
DATA_TABLES(integratedsysteminfo));
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_2->gpuclk_ss_percentage);
if (info_v2_2 == NULL)
return BP_RESULT_BADBIOSTABLE;
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_2->gpuclk_ss_percentage);
info->gpu_cap_info =
le32_to_cpu(info_v2_2->gpucapinfo);
/*

View File

@ -340,7 +340,6 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
dcn32_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
return &clk_mgr->base;
break;
}
case AMDGPU_FAMILY_GC_11_0_1: {

View File

@ -546,6 +546,8 @@ static unsigned int find_dcfclk_for_voltage(const struct vg_dpm_clocks *clock_ta
int i;
for (i = 0; i < VG_NUM_SOC_VOLTAGE_LEVELS; i++) {
if (i >= VG_NUM_DCFCLK_DPM_LEVELS)
break;
if (clock_table->SocVoltage[i] == voltage)
return clock_table->DcfClocks[i];
}

View File

@ -414,7 +414,6 @@ static void init_clk_states(struct clk_mgr *clk_mgr)
uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
clk_mgr->clks.dtbclk_en = true;
clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk
clk_mgr->clks.p_state_change_support = true;
clk_mgr->clks.prev_p_state_change_support = true;
@ -659,10 +658,13 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entries - 1];
uint32_t max_fclk = 0, min_pstate = 0, max_dispclk = 0, max_dppclk = 0;
uint32_t max_pstate = 0, max_dram_speed_mts = 0, min_dram_speed_mts = 0;
uint32_t num_memps, num_fclk, num_dcfclk;
int i;
/* Determine min/max p-state values. */
for (i = 0; i < clock_table->NumMemPstatesEnabled; i++) {
num_memps = (clock_table->NumMemPstatesEnabled > NUM_MEM_PSTATE_LEVELS) ? NUM_MEM_PSTATE_LEVELS :
clock_table->NumMemPstatesEnabled;
for (i = 0; i < num_memps; i++) {
uint32_t dram_speed_mts = calc_dram_speed_mts(&clock_table->MemPstateTable[i]);
if (is_valid_clock_value(dram_speed_mts) && dram_speed_mts > max_dram_speed_mts) {
@ -674,7 +676,7 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
min_dram_speed_mts = max_dram_speed_mts;
min_pstate = max_pstate;
for (i = 0; i < clock_table->NumMemPstatesEnabled; i++) {
for (i = 0; i < num_memps; i++) {
uint32_t dram_speed_mts = calc_dram_speed_mts(&clock_table->MemPstateTable[i]);
if (is_valid_clock_value(dram_speed_mts) && dram_speed_mts < min_dram_speed_mts) {
@ -703,9 +705,13 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
/* Base the clock table on dcfclk, need at least one entry regardless of pmfw table */
ASSERT(clock_table->NumDcfClkLevelsEnabled > 0);
max_fclk = find_max_clk_value(clock_table->FclkClocks_Freq, clock_table->NumFclkLevelsEnabled);
num_fclk = (clock_table->NumFclkLevelsEnabled > NUM_FCLK_DPM_LEVELS) ? NUM_FCLK_DPM_LEVELS :
clock_table->NumFclkLevelsEnabled;
max_fclk = find_max_clk_value(clock_table->FclkClocks_Freq, num_fclk);
for (i = 0; i < clock_table->NumDcfClkLevelsEnabled; i++) {
num_dcfclk = (clock_table->NumFclkLevelsEnabled > NUM_DCFCLK_DPM_LEVELS) ? NUM_DCFCLK_DPM_LEVELS :
clock_table->NumDcfClkLevelsEnabled;
for (i = 0; i < num_dcfclk; i++) {
int j;
/* First search defaults for the clocks we don't read using closest lower or equal default dcfclk */

View File

@ -361,32 +361,32 @@ void dcn35_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zst
case DCN_ZSTATE_SUPPORT_ALLOW:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
param = (1 << 10) | (1 << 9) | (1 << 8);
smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW, param = %d\n", __func__, param);
smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW, param = 0x%x\n", __func__, param);
break;
case DCN_ZSTATE_SUPPORT_DISALLOW:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
param = 0;
smu_print("%s: SMC_MSG_AllowZstatesEntry msg_id = DISALLOW, param = %d\n", __func__, param);
smu_print("%s: SMC_MSG_AllowZstatesEntry msg_id = DISALLOW, param = 0x%x\n", __func__, param);
break;
case DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
param = (1 << 10);
smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z10_ONLY, param = %d\n", __func__, param);
smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z10_ONLY, param = 0x%x\n", __func__, param);
break;
case DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
param = (1 << 10) | (1 << 8);
smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z8_Z10_ONLY, param = %d\n", __func__, param);
smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z8_Z10_ONLY, param = 0x%x\n", __func__, param);
break;
case DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
param = (1 << 8);
smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z8_ONLY, param = %d\n", __func__, param);
smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z8_ONLY, param = 0x%x\n", __func__, param);
break;
default: //DCN_ZSTATE_SUPPORT_UNKNOWN
@ -400,7 +400,7 @@ void dcn35_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zst
clk_mgr,
msg_id,
param);
smu_print("%s: msg_id = %d, param = 0x%x, return = %d\n", __func__, msg_id, param, retv);
smu_print("%s: msg_id = %d, param = 0x%x, return = 0x%x\n", __func__, msg_id, param, retv);
}
int dcn35_smu_get_dprefclk(struct clk_mgr_internal *clk_mgr)

View File

@ -2454,6 +2454,10 @@ static enum surface_update_type get_scaling_info_update_type(
/* Changing clip size of a large surface may result in MPC slice count change */
update_flags->bits.bandwidth_change = 1;
if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width ||
u->scaling_info->clip_rect.height != u->surface->clip_rect.height)
update_flags->bits.clip_size_change = 1;
if (u->scaling_info->src_rect.x != u->surface->src_rect.x
|| u->scaling_info->src_rect.y != u->surface->src_rect.y
|| u->scaling_info->clip_rect.x != u->surface->clip_rect.x
@ -2467,7 +2471,8 @@ static enum surface_update_type get_scaling_info_update_type(
|| update_flags->bits.scaling_change)
return UPDATE_TYPE_FULL;
if (update_flags->bits.position_change)
if (update_flags->bits.position_change ||
update_flags->bits.clip_size_change)
return UPDATE_TYPE_MED;
return UPDATE_TYPE_FAST;
@ -3093,10 +3098,6 @@ static bool update_planes_and_stream_state(struct dc *dc,
if (otg_master && otg_master->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE)
resource_build_test_pattern_params(&context->res_ctx, otg_master);
if (otg_master && (otg_master->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422 ||
otg_master->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420))
resource_build_subsampling_params(&context->res_ctx, otg_master);
}
}

View File

@ -822,16 +822,6 @@ static struct rect calculate_odm_slice_in_timing_active(struct pipe_ctx *pipe_ct
stream->timing.v_border_bottom +
stream->timing.v_border_top;
/* Recout for ODM slices after the first slice need one extra left edge pixel
* for 3-tap chroma subsampling.
*/
if (odm_slice_idx > 0 &&
(pipe_ctx->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422 ||
pipe_ctx->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)) {
odm_rec.x -= 1;
odm_rec.width += 1;
}
return odm_rec;
}
@ -1448,7 +1438,6 @@ void resource_build_test_pattern_params(struct resource_context *res_ctx,
enum controller_dp_test_pattern controller_test_pattern;
enum controller_dp_color_space controller_color_space;
enum dc_color_depth color_depth = otg_master->stream->timing.display_color_depth;
enum dc_pixel_encoding pixel_encoding = otg_master->stream->timing.pixel_encoding;
int h_active = otg_master->stream->timing.h_addressable +
otg_master->stream->timing.h_border_left +
otg_master->stream->timing.h_border_right;
@ -1480,36 +1469,10 @@ void resource_build_test_pattern_params(struct resource_context *res_ctx,
else
params->width = last_odm_slice_width;
/* Extra left edge pixel is required for 3-tap chroma subsampling. */
if (i != 0 && (pixel_encoding == PIXEL_ENCODING_YCBCR422 ||
pixel_encoding == PIXEL_ENCODING_YCBCR420)) {
params->offset -= 1;
params->width += 1;
}
offset += odm_slice_width;
}
}
void resource_build_subsampling_params(struct resource_context *res_ctx,
struct pipe_ctx *otg_master)
{
struct pipe_ctx *opp_heads[MAX_PIPES];
int odm_cnt = 1;
int i;
odm_cnt = resource_get_opp_heads_for_otg_master(otg_master, res_ctx, opp_heads);
/* For ODM slices after the first slice, extra left edge pixel is required
* for 3-tap chroma subsampling.
*/
if (otg_master->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422 ||
otg_master->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
for (i = 0; i < odm_cnt; i++)
opp_heads[i]->stream_res.left_edge_extra_pixel = (i == 0) ? false : true;
}
}
bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
{
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;

View File

@ -51,7 +51,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
#define DC_VER "3.2.271"
#define DC_VER "3.2.272"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@ -1252,6 +1252,7 @@ union surface_update_flags {
uint32_t rotation_change:1;
uint32_t swizzle_change:1;
uint32_t scaling_change:1;
uint32_t clip_size_change: 1;
uint32_t position_change:1;
uint32_t in_transfer_func_change:1;
uint32_t input_csc_change:1;
@ -1571,7 +1572,19 @@ struct dc_link {
enum engine_id dpia_preferred_eng_id;
bool test_pattern_enabled;
/* Pending/Current test pattern are only used to perform and track
* FIXED_VS retimer test pattern/lane adjustment override state.
* Pending allows link HWSS to differentiate PHY vs non-PHY pattern,
* to perform specific lane adjust overrides before setting certain
* PHY test patterns. In cases when lane adjust and set test pattern
* calls are not performed atomically (i.e. performing link training),
* pending_test_pattern will be invalid or contain a non-PHY test pattern
* and current_test_pattern will contain required context for any future
* set pattern/set lane adjust to transition between override state(s).
* */
enum dp_test_pattern current_test_pattern;
enum dp_test_pattern pending_test_pattern;
union compliance_test_state compliance_test_state;
void *priv;

View File

@ -1198,6 +1198,7 @@ bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)
static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
{
struct dc_dmub_srv *dc_dmub_srv;
union dmub_rb_cmd cmd = {0};
if (dc->debug.dmcub_emulation)
@ -1206,6 +1207,8 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub)
return;
dc_dmub_srv = dc->ctx->dmub_srv;
memset(&cmd, 0, sizeof(cmd));
cmd.idle_opt_notify_idle.header.type = DMUB_CMD__IDLE_OPT;
cmd.idle_opt_notify_idle.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_NOTIFY_IDLE;
@ -1216,10 +1219,32 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
cmd.idle_opt_notify_idle.cntl_data.driver_idle = allow_idle;
if (allow_idle) {
volatile struct dmub_shared_state_ips_driver *ips_driver =
&dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver;
union dmub_shared_state_ips_driver_signals new_signals;
dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
if (dc->hwss.set_idle_state)
dc->hwss.set_idle_state(dc, true);
memset(&new_signals, 0, sizeof(new_signals));
if (dc->config.disable_ips == DMUB_IPS_ENABLE ||
dc->config.disable_ips == DMUB_IPS_DISABLE_DYNAMIC) {
new_signals.bits.allow_pg = 1;
new_signals.bits.allow_ips1 = 1;
new_signals.bits.allow_ips2 = 1;
new_signals.bits.allow_z10 = 1;
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) {
new_signals.bits.allow_ips1 = 1;
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) {
new_signals.bits.allow_pg = 1;
new_signals.bits.allow_ips1 = 1;
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) {
new_signals.bits.allow_pg = 1;
new_signals.bits.allow_ips1 = 1;
new_signals.bits.allow_ips2 = 1;
}
ips_driver->signals = new_signals;
}
/* NOTE: This does not use the "wake" interface since this is part of the wake path. */
@ -1229,8 +1254,7 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
{
uint32_t allow_state = 0;
uint32_t commit_state = 0;
struct dc_dmub_srv *dc_dmub_srv;
if (dc->debug.dmcub_emulation)
return;
@ -1238,61 +1262,44 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub)
return;
if (dc->hwss.get_idle_state &&
dc->hwss.set_idle_state &&
dc->clk_mgr->funcs->exit_low_power_state) {
dc_dmub_srv = dc->ctx->dmub_srv;
allow_state = dc->hwss.get_idle_state(dc);
dc->hwss.set_idle_state(dc, false);
if (dc->clk_mgr->funcs->exit_low_power_state) {
volatile const struct dmub_shared_state_ips_fw *ips_fw =
&dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw;
volatile struct dmub_shared_state_ips_driver *ips_driver =
&dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver;
union dmub_shared_state_ips_driver_signals prev_driver_signals = ips_driver->signals;
if (!(allow_state & DMUB_IPS2_ALLOW_MASK)) {
// Wait for evaluation time
for (;;) {
udelay(dc->debug.ips2_eval_delay_us);
commit_state = dc->hwss.get_idle_state(dc);
if (commit_state & DMUB_IPS2_ALLOW_MASK)
break;
ips_driver->signals.all = 0;
/* allow was still set, retry eval delay */
dc->hwss.set_idle_state(dc, false);
}
if (prev_driver_signals.bits.allow_ips2) {
udelay(dc->debug.ips2_eval_delay_us);
if (!(commit_state & DMUB_IPS2_COMMIT_MASK)) {
if (ips_fw->signals.bits.ips2_commit) {
// Tell PMFW to exit low power state
dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
// Wait for IPS2 entry upper bound
udelay(dc->debug.ips2_entry_delay_us);
dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
for (;;) {
commit_state = dc->hwss.get_idle_state(dc);
if (commit_state & DMUB_IPS2_COMMIT_MASK)
break;
while (ips_fw->signals.bits.ips2_commit)
udelay(1);
}
if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))
ASSERT(0);
/* TODO: See if we can return early here - IPS2 should go
* back directly to IPS0 and clear the flags, but it will
* be safer to directly notify DMCUB of this.
*/
allow_state = dc->hwss.get_idle_state(dc);
dmub_srv_sync_inbox1(dc->ctx->dmub_srv->dmub);
}
}
dc_dmub_srv_notify_idle(dc, false);
if (!(allow_state & DMUB_IPS1_ALLOW_MASK)) {
for (;;) {
commit_state = dc->hwss.get_idle_state(dc);
if (commit_state & DMUB_IPS1_COMMIT_MASK)
break;
if (prev_driver_signals.bits.allow_ips1) {
while (ips_fw->signals.bits.ips1_commit)
udelay(1);
}
}
}

View File

@ -65,5 +65,9 @@ bool should_use_dmub_lock(struct dc_link *link)
{
if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
return true;
if (link->replay_settings.replay_feature_enabled)
return true;
return false;
}

View File

@ -56,16 +56,13 @@ static void dpp3_enable_cm_block(
static enum dc_lut_mode dpp30_get_gamcor_current(struct dpp *dpp_base)
{
enum dc_lut_mode mode;
enum dc_lut_mode mode = LUT_BYPASS;
uint32_t state_mode;
uint32_t lut_mode;
struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_MODE_CURRENT, &state_mode);
if (state_mode == 0)
mode = LUT_BYPASS;
if (state_mode == 2) {//Programmable RAM LUT
REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT_CURRENT, &lut_mode);
if (lut_mode == 0)

View File

@ -2760,7 +2760,7 @@ static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk
struct _vcs_dpi_voltage_scaling_st entry = {0};
struct clk_limit_table_entry max_clk_data = {0};
unsigned int min_dcfclk_mhz = 399, min_fclk_mhz = 599;
unsigned int min_dcfclk_mhz = 199, min_fclk_mhz = 299;
static const unsigned int num_dcfclk_stas = 5;
unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {199, 615, 906, 1324, 1564};

View File

@ -588,7 +588,9 @@ void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context)
} else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
struct dc_link *link = context->streams[0]->sink->link;
bool is_pwrseq0 = link && link->link_index == 0;
bool is_psr1 = link && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr;
bool is_psr = (link && (link->psr_settings.psr_version == DC_PSR_VERSION_1 ||
link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) && !link->panel_config.psr.disable_psr);
bool is_replay = link && link->replay_settings.replay_feature_enabled;
int minmum_z8_residency =
dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;
bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency;
@ -596,12 +598,14 @@ void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context)
dc->debug.minimum_z10_residency_time > 0 ? dc->debug.minimum_z10_residency_time : 5000;
bool allow_z10 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z10_residency;
/*for psr1/psr-su, we allow z8 and z10 based on latency, for replay with IPS enabled, it will enter ips2*/
if (is_pwrseq0 && allow_z10)
support = DCN_ZSTATE_SUPPORT_ALLOW;
else if (is_pwrseq0 && is_psr1)
else if (is_pwrseq0 && (is_psr || is_replay))
support = allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;
else if (allow_z8)
support = DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY;
}
context->bw_ctx.bw.dcn.clk.zstate_support = support;

View File

@ -1573,8 +1573,7 @@ static void dcn20_detect_pipe_changes(struct dc_state *old_state,
* makes this assumption at the moment with how hubp reset is matched to
* same index mpcc reset.
*/
if (old_pipe->stream_res.opp != new_pipe->stream_res.opp ||
old_pipe->stream_res.left_edge_extra_pixel != new_pipe->stream_res.left_edge_extra_pixel)
if (old_pipe->stream_res.opp != new_pipe->stream_res.opp)
new_pipe->update_flags.bits.opp_changed = 1;
if (old_pipe->stream_res.tg != new_pipe->stream_res.tg)
new_pipe->update_flags.bits.tg_changed = 1;
@ -1740,6 +1739,7 @@ static void dcn20_update_dchubp_dpp(
if (pipe_ctx->update_flags.bits.scaler ||
plane_state->update_flags.bits.scaling_change ||
plane_state->update_flags.bits.position_change ||
plane_state->update_flags.bits.clip_size_change ||
plane_state->update_flags.bits.per_pixel_alpha_change ||
pipe_ctx->stream->update_flags.bits.scaling) {
pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha;
@ -1752,6 +1752,7 @@ static void dcn20_update_dchubp_dpp(
if (pipe_ctx->update_flags.bits.viewport ||
(context == dc->current_state && plane_state->update_flags.bits.position_change) ||
(context == dc->current_state && plane_state->update_flags.bits.scaling_change) ||
(context == dc->current_state && plane_state->update_flags.bits.clip_size_change) ||
(context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) {
hubp->funcs->mem_program_viewport(
@ -1960,10 +1961,6 @@ static void dcn20_program_pipe(
pipe_ctx->stream_res.opp,
&pipe_ctx->stream->bit_depth_params,
&pipe_ctx->stream->clamping);
pipe_ctx->stream_res.opp->funcs->opp_program_left_edge_extra_pixel(
pipe_ctx->stream_res.opp,
pipe_ctx->stream_res.left_edge_extra_pixel);
}
/* Set ABM pipe after other pipe configurations done */

View File

@ -211,7 +211,7 @@ void dcn21_set_pipe(struct pipe_ctx *pipe_ctx)
struct dmcu *dmcu = pipe_ctx->stream->ctx->dc->res_pool->dmcu;
uint32_t otg_inst;
if (!abm && !tg && !panel_cntl)
if (!abm || !tg || !panel_cntl)
return;
otg_inst = tg->inst;
@ -245,7 +245,7 @@ bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx,
struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl;
uint32_t otg_inst;
if (!abm && !tg && !panel_cntl)
if (!abm || !tg || !panel_cntl)
return false;
otg_inst = tg->inst;

View File

@ -333,8 +333,6 @@ struct stream_resource {
uint8_t gsl_group;
struct test_pattern_params test_pattern_params;
bool left_edge_extra_pixel;
};
struct plane_resource {

View File

@ -107,10 +107,6 @@ void resource_build_test_pattern_params(
struct resource_context *res_ctx,
struct pipe_ctx *pipe_ctx);
void resource_build_subsampling_params(
struct resource_context *res_ctx,
struct pipe_ctx *pipe_ctx);
bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx);
enum dc_status resource_build_scaling_params_for_context(

View File

@ -61,22 +61,6 @@ static enum dc_link_rate get_link_rate_from_test_link_rate(uint8_t test_rate)
}
}
static bool is_dp_phy_sqaure_pattern(enum dp_test_pattern test_pattern)
{
return (DP_TEST_PATTERN_SQUARE_BEGIN <= test_pattern &&
test_pattern <= DP_TEST_PATTERN_SQUARE_END);
}
static bool is_dp_phy_pattern(enum dp_test_pattern test_pattern)
{
if ((DP_TEST_PATTERN_PHY_PATTERN_BEGIN <= test_pattern &&
test_pattern <= DP_TEST_PATTERN_PHY_PATTERN_END) ||
test_pattern == DP_TEST_PATTERN_VIDEO_MODE)
return true;
else
return false;
}
static void dp_retrain_link_dp_test(struct dc_link *link,
struct dc_link_settings *link_setting,
bool skip_video_pattern)
@ -361,7 +345,7 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
test_pattern_size);
}
if (is_dp_phy_sqaure_pattern(test_pattern)) {
if (IS_DP_PHY_SQUARE_PATTERN(test_pattern)) {
test_pattern_size = 1; // Square pattern data is 1 byte (DP spec)
core_link_read_dpcd(
link,
@ -623,6 +607,8 @@ bool dp_set_test_pattern(
if (pipe_ctx == NULL)
return false;
link->pending_test_pattern = test_pattern;
/* Reset CRTC Test Pattern if it is currently running and request is VideoMode */
if (link->test_pattern_enabled && test_pattern ==
DP_TEST_PATTERN_VIDEO_MODE) {
@ -643,12 +629,13 @@ bool dp_set_test_pattern(
/* Reset Test Pattern state */
link->test_pattern_enabled = false;
link->current_test_pattern = test_pattern;
link->pending_test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
return true;
}
/* Check for PHY Test Patterns */
if (is_dp_phy_pattern(test_pattern)) {
if (IS_DP_PHY_PATTERN(test_pattern)) {
/* Set DPCD Lane Settings before running test pattern */
if (p_link_settings != NULL) {
if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
@ -681,6 +668,7 @@ bool dp_set_test_pattern(
/* Set Test Pattern state */
link->test_pattern_enabled = true;
link->current_test_pattern = test_pattern;
link->pending_test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
if (p_link_settings != NULL)
dpcd_set_link_settings(link,
p_link_settings);
@ -756,7 +744,7 @@ bool dp_set_test_pattern(
return false;
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
if (is_dp_phy_sqaure_pattern(test_pattern))
if (IS_DP_PHY_SQUARE_PATTERN(test_pattern))
core_link_write_dpcd(link,
DP_LINK_SQUARE_PATTERN,
p_custom_pattern,
@ -884,6 +872,7 @@ bool dp_set_test_pattern(
/* Set Test Pattern state */
link->test_pattern_enabled = true;
link->current_test_pattern = test_pattern;
link->pending_test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
}
return true;

View File

@ -80,21 +80,23 @@ static bool set_dio_fixed_vs_pe_retimer_dp_link_test_pattern_override(struct dc_
const uint8_t vendor_lttpr_write_data_pg0[4] = {0x1, 0x11, 0x0, 0x0};
const uint8_t vendor_lttpr_exit_manual_automation_0[4] = {0x1, 0x11, 0x0, 0x06};
if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED)
return false;
if (tp_params == NULL)
return false;
if (link->current_test_pattern >= DP_TEST_PATTERN_SQUARE_BEGIN &&
link->current_test_pattern <= DP_TEST_PATTERN_SQUARE_END) {
if (IS_DP_PHY_SQUARE_PATTERN(link->current_test_pattern))
// Deprogram overrides from previous test pattern
dp_dio_fixed_vs_pe_retimer_exit_manual_automation(link);
}
switch (tp_params->dp_phy_pattern) {
case DP_TEST_PATTERN_80BIT_CUSTOM:
if (tp_params->custom_pattern_size == 0 || memcmp(tp_params->custom_pattern,
pltpat_custom, tp_params->custom_pattern_size) != 0)
return false;
hw_tp_params.custom_pattern = tp_params->custom_pattern;
hw_tp_params.custom_pattern_size = tp_params->custom_pattern_size;
break;
case DP_TEST_PATTERN_D102:
break;
@ -185,13 +187,7 @@ static const struct link_hwss dio_fixed_vs_pe_retimer_link_hwss = {
bool requires_fixed_vs_pe_retimer_dio_link_hwss(const struct dc_link *link)
{
if (!(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN))
return false;
if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED)
return false;
return true;
return (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN);
}
const struct link_hwss *get_dio_fixed_vs_pe_retimer_link_hwss(void)

View File

@ -74,13 +74,16 @@ static void dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(struct dc_link *link,
static void dp_hpo_fixed_vs_pe_retimer_program_override_test_pattern(struct dc_link *link,
struct encoder_set_dp_phy_pattern_param *tp_params)
{
uint8_t clk_src = 0x4C;
uint8_t pattern = 0x4F; /* SQ128 */
const uint8_t vendor_lttpr_write_data_pg0[4] = {0x1, 0x11, 0x0, 0x0};
const uint8_t vendor_lttpr_write_data_pg1[4] = {0x1, 0x50, 0x50, 0x0};
const uint8_t vendor_lttpr_write_data_pg2[4] = {0x1, 0x51, 0x50, 0x0};
const uint8_t vendor_lttpr_write_data_pg1[4] = {0x1, 0x50, 0x50, clk_src};
const uint8_t vendor_lttpr_write_data_pg2[4] = {0x1, 0x51, 0x50, clk_src};
const uint8_t vendor_lttpr_write_data_pg3[4] = {0x1, 0x10, 0x58, 0x21};
const uint8_t vendor_lttpr_write_data_pg4[4] = {0x1, 0x10, 0x59, 0x21};
const uint8_t vendor_lttpr_write_data_pg5[4] = {0x1, 0x1C, 0x58, 0x4F};
const uint8_t vendor_lttpr_write_data_pg6[4] = {0x1, 0x1C, 0x59, 0x4F};
const uint8_t vendor_lttpr_write_data_pg5[4] = {0x1, 0x1C, 0x58, pattern};
const uint8_t vendor_lttpr_write_data_pg6[4] = {0x1, 0x1C, 0x59, pattern};
const uint8_t vendor_lttpr_write_data_pg7[4] = {0x1, 0x30, 0x51, 0x20};
const uint8_t vendor_lttpr_write_data_pg8[4] = {0x1, 0x30, 0x52, 0x20};
const uint8_t vendor_lttpr_write_data_pg9[4] = {0x1, 0x30, 0x54, 0x20};
@ -123,18 +126,20 @@ static bool dp_hpo_fixed_vs_pe_retimer_set_override_test_pattern(struct dc_link
struct encoder_set_dp_phy_pattern_param hw_tp_params = { 0 };
const uint8_t vendor_lttpr_exit_manual_automation_0[4] = {0x1, 0x11, 0x0, 0x06};
if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED)
return false;
if (tp_params == NULL)
return false;
if (tp_params->dp_phy_pattern < DP_TEST_PATTERN_SQUARE_BEGIN ||
tp_params->dp_phy_pattern > DP_TEST_PATTERN_SQUARE_END) {
if (!IS_DP_PHY_SQUARE_PATTERN(tp_params->dp_phy_pattern)) {
// Deprogram overrides from previously set square wave override
if (link->current_test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM ||
link->current_test_pattern == DP_TEST_PATTERN_D102)
link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
&vendor_lttpr_exit_manual_automation_0[0],
sizeof(vendor_lttpr_exit_manual_automation_0));
else
else if (IS_DP_PHY_SQUARE_PATTERN(link->current_test_pattern))
dp_dio_fixed_vs_pe_retimer_exit_manual_automation(link);
return false;
@ -148,8 +153,6 @@ static bool dp_hpo_fixed_vs_pe_retimer_set_override_test_pattern(struct dc_link
dp_hpo_fixed_vs_pe_retimer_program_override_test_pattern(link, tp_params);
dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(link, &link->cur_lane_setting[0]);
return true;
}
@ -170,16 +173,18 @@ static void set_hpo_fixed_vs_pe_retimer_dp_lane_settings(struct dc_link *link,
const struct dc_link_settings *link_settings,
const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX])
{
link_res->hpo_dp_link_enc->funcs->set_ffe(
link_res->hpo_dp_link_enc,
link_settings,
lane_settings[0].FFE_PRESET.raw);
// FFE is programmed when retimer is programmed for SQ128, but explicit
// programming needed here as well in case FFE-only update is requested
if (link->current_test_pattern >= DP_TEST_PATTERN_SQUARE_BEGIN &&
link->current_test_pattern <= DP_TEST_PATTERN_SQUARE_END)
dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(link, &lane_settings[0]);
// Don't update our HW FFE when outputting phy test patterns
if (IS_DP_PHY_PATTERN(link->pending_test_pattern)) {
// Directly program FIXED_VS retimer FFE for SQ128 override
if (IS_DP_PHY_SQUARE_PATTERN(link->pending_test_pattern)) {
dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(link, &lane_settings[0]);
}
} else {
link_res->hpo_dp_link_enc->funcs->set_ffe(
link_res->hpo_dp_link_enc,
link_settings,
lane_settings[0].FFE_PRESET.raw);
}
}
static void enable_hpo_fixed_vs_pe_retimer_dp_link_output(struct dc_link *link,
@ -214,13 +219,7 @@ static const struct link_hwss hpo_fixed_vs_pe_retimer_dp_link_hwss = {
bool requires_fixed_vs_pe_retimer_hpo_link_hwss(const struct dc_link *link)
{
if (!(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN))
return false;
if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED)
return false;
return true;
return requires_fixed_vs_pe_retimer_dio_link_hwss(link);
}
const struct link_hwss *get_hpo_fixed_vs_pe_retimer_dp_link_hwss(void)

View File

@ -359,7 +359,7 @@ bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const un
struct dc_link *dpia_link[MAX_DPIA_NUM] = {0};
int num_dpias = 0;
for (uint8_t i = 0; i < num_streams; ++i) {
for (unsigned int i = 0; i < num_streams; ++i) {
if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT) {
/* new dpia sst stream, check whether it exceeds max dpia */
if (num_dpias >= MAX_DPIA_NUM)

View File

@ -37,6 +37,7 @@
#include "clk_mgr.h"
#include "resource.h"
#include "link_enc_cfg.h"
#include "atomfirmware.h"
#define DC_LOGGER \
link->ctx->logger
@ -100,8 +101,11 @@ void dp_set_hw_lane_settings(
{
const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
// Don't return here if using FIXED_VS link HWSS and encoding is 128b/132b
if ((link_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) &&
!is_immediate_downstream(link, offset))
!is_immediate_downstream(link, offset) &&
(!(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) ||
link_dp_get_encoding_format(&link_settings->link_settings) == DP_8b_10b_ENCODING))
return;
if (link_hwss->ext.set_dp_lane_settings)

View File

@ -517,6 +517,7 @@ enum link_training_result dp_check_link_loss_status(
{
enum link_training_result status = LINK_TRAINING_SUCCESS;
union lane_status lane_status;
union lane_align_status_updated dpcd_lane_status_updated;
uint8_t dpcd_buf[6] = {0};
uint32_t lane;
@ -532,10 +533,12 @@ enum link_training_result dp_check_link_loss_status(
* check lanes status
*/
lane_status.raw = dp_get_nibble_at_index(&dpcd_buf[2], lane);
dpcd_lane_status_updated.raw = dpcd_buf[4];
if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
!lane_status.bits.CR_DONE_0 ||
!lane_status.bits.SYMBOL_LOCKED_0) {
!lane_status.bits.SYMBOL_LOCKED_0 ||
!dp_is_interlane_aligned(dpcd_lane_status_updated)) {
/* if one of the channel equalization, clock
* recovery or symbol lock is dropped
* consider it as (link has been

View File

@ -619,7 +619,7 @@ static enum link_training_result dpia_training_eq_non_transparent(
uint32_t retries_eq = 0;
enum dc_status status;
enum dc_dp_training_pattern tr_pattern;
uint32_t wait_time_microsec;
uint32_t wait_time_microsec = 0;
enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
union lane_align_status_updated dpcd_lane_status_updated = {0};
union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};

View File

@ -892,7 +892,8 @@ bool edp_set_replay_allow_active(struct dc_link *link, const bool *allow_active,
/* Set power optimization flag */
if (power_opts && link->replay_settings.replay_power_opt_active != *power_opts) {
if (link->replay_settings.replay_feature_enabled && replay->funcs->replay_set_power_opt) {
if (replay != NULL && link->replay_settings.replay_feature_enabled &&
replay->funcs->replay_set_power_opt) {
replay->funcs->replay_set_power_opt(replay, *power_opts, panel_inst);
link->replay_settings.replay_power_opt_active = *power_opts;
}

View File

@ -781,7 +781,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_z10 = false,
.ignore_pg = true,
.psp_disabled_wa = true,
.ips2_eval_delay_us = 1650,
.ips2_eval_delay_us = 2000,
.ips2_entry_delay_us = 800,
.disable_dmub_reallow_idle = true,
.static_screen_wait_frames = 2,
@ -1907,7 +1907,8 @@ static bool dcn35_resource_construct(
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
/*HW default is to have all the FGCG enabled, SW no need to program them*/
dc->debug.enable_fine_grain_clock_gating.u32All = 0xFFFF;
// Init the vm_helper
if (dc->vm_helper)
vm_helper_init(dc->vm_helper, 16);

View File

@ -125,6 +125,7 @@ enum dmub_window_id {
DMUB_WINDOW_5_TRACEBUFF,
DMUB_WINDOW_6_FW_STATE,
DMUB_WINDOW_7_SCRATCH_MEM,
DMUB_WINDOW_SHARED_STATE,
DMUB_WINDOW_TOTAL,
};
@ -368,7 +369,8 @@ struct dmub_srv_hw_funcs {
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
const struct dmub_window *cw6);
const struct dmub_window *cw6,
const struct dmub_window *region6);
void (*setup_mailbox)(struct dmub_srv *dmub,
const struct dmub_region *inbox1);
@ -461,6 +463,7 @@ struct dmub_srv_create_params {
* @user_ctx: user provided context for the dmub_srv
* @fw_version: the current firmware version, if any
* @is_virtual: false if hardware support only
* @shared_state: dmub shared state between firmware and driver
* @fw_state: dmub firmware state pointer
*/
struct dmub_srv {
@ -469,6 +472,7 @@ struct dmub_srv {
uint32_t fw_version;
bool is_virtual;
struct dmub_fb scratch_mem_fb;
volatile struct dmub_shared_state_feature_block *shared_state;
volatile const struct dmub_fw_state *fw_state;
/* private: internal use only */

View File

@ -508,6 +508,8 @@ struct dmub_visual_confirm_color {
* @trace_buffer_size: size of the tracebuffer region
* @fw_version: the firmware version information
* @dal_fw: 1 if the firmware is DAL
* @shared_state_size: size of the shared state region in bytes
* @shared_state_features: number of shared state features
*/
struct dmub_fw_meta_info {
uint32_t magic_value; /**< magic value identifying DMUB firmware meta info */
@ -516,6 +518,9 @@ struct dmub_fw_meta_info {
uint32_t fw_version; /**< the firmware version information */
uint8_t dal_fw; /**< 1 if the firmware is DAL */
uint8_t reserved[3]; /**< padding bits */
uint32_t shared_state_size; /**< size of the shared state region in bytes */
uint16_t shared_state_features; /**< number of shared state features */
uint16_t reserved2; /**< padding bytes */
};
/**
@ -659,6 +664,116 @@ enum dmub_fw_boot_options_bit {
DMUB_FW_BOOT_OPTION_BIT_OPTIMIZED_INIT_DONE = (1 << 2), /**< 1 if optimized init done */
};
//==============================================================================
//< DMUB_SHARED_STATE>==========================================================
//==============================================================================
/**
* Shared firmware state between driver and firmware for lockless communication
* in situations where the inbox/outbox may be unavailable.
*
* Each structure *must* be at most 256-bytes in size. The layout allocation is
* described below:
*
* [Header (256 Bytes)][Feature 1 (256 Bytes)][Feature 2 (256 Bytes)]...
*/
/**
* enum dmub_shared_state_feature_id - List of shared state features.
*/
enum dmub_shared_state_feature_id {
DMUB_SHARED_SHARE_FEATURE__INVALID = 0,
DMUB_SHARED_SHARE_FEATURE__IPS_FW = 1,
DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER = 2,
DMUB_SHARED_STATE_FEATURE__LAST, /* Total number of features. */
};
/**
* struct dmub_shared_state_ips_fw - Firmware signals for IPS.
*/
union dmub_shared_state_ips_fw_signals {
struct {
uint32_t ips1_commit : 1; /**< 1 if in IPS1 */
uint32_t ips2_commit : 1; /**< 1 if in IPS2 */
uint32_t reserved_bits : 30; /**< Reversed */
} bits;
uint32_t all;
};
/**
* struct dmub_shared_state_ips_signals - Firmware signals for IPS.
*/
union dmub_shared_state_ips_driver_signals {
struct {
uint32_t allow_pg : 1; /**< 1 if PG is allowed */
uint32_t allow_ips1 : 1; /**< 1 is IPS1 is allowed */
uint32_t allow_ips2 : 1; /**< 1 is IPS1 is allowed */
uint32_t allow_z10 : 1; /**< 1 if Z10 is allowed */
uint32_t reserved_bits : 28; /**< Reversed bits */
} bits;
uint32_t all;
};
/**
* IPS FW Version
*/
#define DMUB_SHARED_STATE__IPS_FW_VERSION 1
/**
* struct dmub_shared_state_ips_fw - Firmware state for IPS.
*/
struct dmub_shared_state_ips_fw {
union dmub_shared_state_ips_fw_signals signals; /**< 4 bytes, IPS signal bits */
uint32_t reserved[61]; /**< Reversed, to be updated when adding new fields. */
}; /* 248-bytes, fixed */
/**
* IPS Driver Version
*/
#define DMUB_SHARED_STATE__IPS_DRIVER_VERSION 1
/**
* struct dmub_shared_state_ips_driver - Driver state for IPS.
*/
struct dmub_shared_state_ips_driver {
union dmub_shared_state_ips_driver_signals signals; /**< 4 bytes, IPS signal bits */
uint32_t reserved[61]; /**< Reversed, to be updated when adding new fields. */
}; /* 248-bytes, fixed */
/**
* enum dmub_shared_state_feature_common - Generic payload.
*/
struct dmub_shared_state_feature_common {
uint32_t padding[62];
}; /* 248-bytes, fixed */
/**
* enum dmub_shared_state_feature_header - Feature description.
*/
struct dmub_shared_state_feature_header {
uint16_t id; /**< Feature ID */
uint16_t version; /**< Feature version */
uint32_t reserved; /**< Reserved bytes. */
}; /* 8 bytes, fixed */
/**
* struct dmub_shared_state_feature_block - Feature block.
*/
struct dmub_shared_state_feature_block {
struct dmub_shared_state_feature_header header; /**< Shared state header. */
union dmub_shared_feature_state_union {
struct dmub_shared_state_feature_common common; /**< Generic data */
struct dmub_shared_state_ips_fw ips_fw; /**< IPS firmware state */
struct dmub_shared_state_ips_driver ips_driver; /**< IPS driver state */
} data; /**< Shared state data. */
}; /* 256-bytes, fixed */
/**
* Shared state size in bytes.
*/
#define DMUB_FW_HEADER_SHARED_STATE_SIZE \
((DMUB_SHARED_STATE_FEATURE__LAST + 1) * sizeof(struct dmub_shared_state_feature_block))
//==============================================================================
//</DMUB_STATUS>================================================================
//==============================================================================

View File

@ -191,7 +191,8 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
const struct dmub_window *cw6)
const struct dmub_window *cw6,
const struct dmub_window *region6)
{
union dmub_addr offset;
uint64_t fb_base, fb_offset;

View File

@ -197,7 +197,8 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
const struct dmub_window *cw6);
const struct dmub_window *cw6,
const struct dmub_window *region6);
void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub,
const struct dmub_region *inbox1);

View File

@ -124,7 +124,8 @@ void dmub_dcn30_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
const struct dmub_window *cw6)
const struct dmub_window *cw6,
const struct dmub_window *region6)
{
union dmub_addr offset;

View File

@ -43,7 +43,8 @@ void dmub_dcn30_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
const struct dmub_window *cw6);
const struct dmub_window *cw6,
const struct dmub_window *region6);
#endif /* _DMUB_DCN30_H_ */

View File

@ -187,7 +187,8 @@ void dmub_dcn31_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
const struct dmub_window *cw6)
const struct dmub_window *cw6,
const struct dmub_window *region6)
{
union dmub_addr offset;

View File

@ -199,7 +199,8 @@ void dmub_dcn31_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
const struct dmub_window *cw6);
const struct dmub_window *cw6,
const struct dmub_window *region6);
void dmub_dcn31_setup_mailbox(struct dmub_srv *dmub,
const struct dmub_region *inbox1);

View File

@ -216,7 +216,8 @@ void dmub_dcn32_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
const struct dmub_window *cw6)
const struct dmub_window *cw6,
const struct dmub_window *region6)
{
union dmub_addr offset;

View File

@ -206,7 +206,8 @@ void dmub_dcn32_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
const struct dmub_window *cw6);
const struct dmub_window *cw6,
const struct dmub_window *region6);
void dmub_dcn32_setup_mailbox(struct dmub_srv *dmub,
const struct dmub_region *inbox1);

View File

@ -229,7 +229,8 @@ void dmub_dcn35_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
const struct dmub_window *cw6)
const struct dmub_window *cw6,
const struct dmub_window *region6)
{
union dmub_addr offset;
@ -275,6 +276,15 @@ void dmub_dcn35_setup_windows(struct dmub_srv *dmub,
REG_SET_2(DMCUB_REGION3_CW6_TOP_ADDRESS, 0,
DMCUB_REGION3_CW6_TOP_ADDRESS, cw6->region.top,
DMCUB_REGION3_CW6_ENABLE, 1);
offset = region6->offset;
REG_WRITE(DMCUB_REGION6_OFFSET, offset.u.low_part);
REG_WRITE(DMCUB_REGION6_OFFSET_HIGH, offset.u.high_part);
REG_SET_2(DMCUB_REGION6_TOP_ADDRESS, 0,
DMCUB_REGION6_TOP_ADDRESS,
region6->region.top - region6->region.base - 1,
DMCUB_REGION6_ENABLE, 1);
}
void dmub_dcn35_setup_mailbox(struct dmub_srv *dmub,

View File

@ -89,6 +89,9 @@ struct dmub_srv;
DMUB_SR(DMCUB_REGION5_OFFSET) \
DMUB_SR(DMCUB_REGION5_OFFSET_HIGH) \
DMUB_SR(DMCUB_REGION5_TOP_ADDRESS) \
DMUB_SR(DMCUB_REGION6_OFFSET) \
DMUB_SR(DMCUB_REGION6_OFFSET_HIGH) \
DMUB_SR(DMCUB_REGION6_TOP_ADDRESS) \
DMUB_SR(DMCUB_SCRATCH0) \
DMUB_SR(DMCUB_SCRATCH1) \
DMUB_SR(DMCUB_SCRATCH2) \
@ -154,6 +157,8 @@ struct dmub_srv;
DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_ENABLE) \
DMUB_SF(DMCUB_REGION5_TOP_ADDRESS, DMCUB_REGION5_TOP_ADDRESS) \
DMUB_SF(DMCUB_REGION5_TOP_ADDRESS, DMCUB_REGION5_ENABLE) \
DMUB_SF(DMCUB_REGION6_TOP_ADDRESS, DMCUB_REGION6_TOP_ADDRESS) \
DMUB_SF(DMCUB_REGION6_TOP_ADDRESS, DMCUB_REGION6_ENABLE) \
DMUB_SF(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE) \
DMUB_SF(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET) \
DMUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE) \
@ -214,7 +219,8 @@ void dmub_dcn35_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
const struct dmub_window *cw6);
const struct dmub_window *cw6,
const struct dmub_window *region6);
void dmub_dcn35_setup_mailbox(struct dmub_srv *dmub,
const struct dmub_region *inbox1);

View File

@ -78,6 +78,7 @@
#define DMUB_CW6_BASE (0x66000000)
#define DMUB_REGION5_BASE (0xA0000000)
#define DMUB_REGION6_BASE (0xC0000000)
static struct dmub_srv_dcn32_regs dmub_srv_dcn32_regs;
static struct dmub_srv_dcn35_regs dmub_srv_dcn35_regs;
@ -480,6 +481,7 @@ enum dmub_status
window_sizes[DMUB_WINDOW_5_TRACEBUFF] = trace_buffer_size;
window_sizes[DMUB_WINDOW_6_FW_STATE] = fw_state_size;
window_sizes[DMUB_WINDOW_7_SCRATCH_MEM] = DMUB_SCRATCH_MEM_SIZE;
window_sizes[DMUB_WINDOW_SHARED_STATE] = DMUB_FW_HEADER_SHARED_STATE_SIZE;
out->fb_size =
dmub_srv_calc_regions_for_memory_type(params, out, window_sizes, DMUB_WINDOW_MEMORY_TYPE_FB);
@ -565,9 +567,10 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
struct dmub_fb *tracebuff_fb = params->fb[DMUB_WINDOW_5_TRACEBUFF];
struct dmub_fb *fw_state_fb = params->fb[DMUB_WINDOW_6_FW_STATE];
struct dmub_fb *scratch_mem_fb = params->fb[DMUB_WINDOW_7_SCRATCH_MEM];
struct dmub_fb *shared_state_fb = params->fb[DMUB_WINDOW_SHARED_STATE];
struct dmub_rb_init_params rb_params, outbox0_rb_params;
struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6;
struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6, region6;
struct dmub_region inbox1, outbox1, outbox0;
if (!dmub->sw_init)
@ -652,10 +655,16 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
dmub->fw_state = fw_state_fb->cpu_addr;
region6.offset.quad_part = shared_state_fb->gpu_addr;
region6.region.base = DMUB_CW6_BASE;
region6.region.top = region6.region.base + shared_state_fb->size;
dmub->shared_state = shared_state_fb->cpu_addr;
dmub->scratch_mem_fb = *scratch_mem_fb;
if (dmub->hw_funcs.setup_windows)
dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, &cw5, &cw6);
dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, &cw5, &cw6, &region6);
if (dmub->hw_funcs.setup_outbox0)
dmub->hw_funcs.setup_outbox0(dmub, &outbox0);

View File

@ -169,6 +169,15 @@ enum dp_test_pattern {
DP_TEST_PATTERN_UNSUPPORTED
};
#define IS_DP_PHY_SQUARE_PATTERN(test_pattern)\
(DP_TEST_PATTERN_SQUARE_BEGIN <= test_pattern &&\
test_pattern <= DP_TEST_PATTERN_SQUARE_END)
#define IS_DP_PHY_PATTERN(test_pattern)\
((DP_TEST_PATTERN_PHY_PATTERN_BEGIN <= test_pattern &&\
test_pattern <= DP_TEST_PATTERN_PHY_PATTERN_END) ||\
test_pattern == DP_TEST_PATTERN_VIDEO_MODE)
enum dp_test_pattern_color_space {
DP_TEST_PATTERN_COLOR_SPACE_RGB,
DP_TEST_PATTERN_COLOR_SPACE_YCBCR601,

View File

@ -174,6 +174,7 @@ enum amd_powergating_state {
#define AMD_PG_SUPPORT_ATHUB (1 << 16)
#define AMD_PG_SUPPORT_JPEG (1 << 17)
#define AMD_PG_SUPPORT_IH_SRAM_PG (1 << 18)
#define AMD_PG_SUPPORT_JPEG_DPG (1 << 19)
/**
* enum PP_FEATURE_MASK - Used to mask power play features.

View File

@ -0,0 +1,287 @@
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef _athub_4_1_0_OFFSET_HEADER
#define _athub_4_1_0_OFFSET_HEADER
// addressBlock: athub_xpbdec
// base address: 0x3000
#define regXPB_RTR_SRC_APRTR0 0x0000
#define regXPB_RTR_SRC_APRTR0_BASE_IDX 0
#define regXPB_RTR_SRC_APRTR1 0x0001
#define regXPB_RTR_SRC_APRTR1_BASE_IDX 0
#define regXPB_RTR_SRC_APRTR2 0x0002
#define regXPB_RTR_SRC_APRTR2_BASE_IDX 0
#define regXPB_RTR_SRC_APRTR3 0x0003
#define regXPB_RTR_SRC_APRTR3_BASE_IDX 0
#define regXPB_RTR_SRC_APRTR4 0x0004
#define regXPB_RTR_SRC_APRTR4_BASE_IDX 0
#define regXPB_RTR_SRC_APRTR5 0x0005
#define regXPB_RTR_SRC_APRTR5_BASE_IDX 0
#define regXPB_RTR_SRC_APRTR6 0x0006
#define regXPB_RTR_SRC_APRTR6_BASE_IDX 0
#define regXPB_RTR_SRC_APRTR7 0x0007
#define regXPB_RTR_SRC_APRTR7_BASE_IDX 0
#define regXPB_RTR_SRC_APRTR8 0x0008
#define regXPB_RTR_SRC_APRTR8_BASE_IDX 0
#define regXPB_RTR_SRC_APRTR9 0x0009
#define regXPB_RTR_SRC_APRTR9_BASE_IDX 0
#define regXPB_RTR_SRC_APRTR10 0x000a
#define regXPB_RTR_SRC_APRTR10_BASE_IDX 0
#define regXPB_RTR_SRC_APRTR11 0x000b
#define regXPB_RTR_SRC_APRTR11_BASE_IDX 0
#define regXPB_RTR_SRC_APRTR12 0x000c
#define regXPB_RTR_SRC_APRTR12_BASE_IDX 0
#define regXPB_RTR_SRC_APRTR13 0x000d
#define regXPB_RTR_SRC_APRTR13_BASE_IDX 0
#define regXPB_RTR_DEST_MAP0 0x000e
#define regXPB_RTR_DEST_MAP0_BASE_IDX 0
#define regXPB_RTR_DEST_MAP1 0x000f
#define regXPB_RTR_DEST_MAP1_BASE_IDX 0
#define regXPB_RTR_DEST_MAP2 0x0010
#define regXPB_RTR_DEST_MAP2_BASE_IDX 0
#define regXPB_RTR_DEST_MAP3 0x0011
#define regXPB_RTR_DEST_MAP3_BASE_IDX 0
#define regXPB_RTR_DEST_MAP4 0x0012
#define regXPB_RTR_DEST_MAP4_BASE_IDX 0
#define regXPB_RTR_DEST_MAP5 0x0013
#define regXPB_RTR_DEST_MAP5_BASE_IDX 0
#define regXPB_RTR_DEST_MAP6 0x0014
#define regXPB_RTR_DEST_MAP6_BASE_IDX 0
#define regXPB_RTR_DEST_MAP7 0x0015
#define regXPB_RTR_DEST_MAP7_BASE_IDX 0
#define regXPB_RTR_DEST_MAP8 0x0016
#define regXPB_RTR_DEST_MAP8_BASE_IDX 0
#define regXPB_RTR_DEST_MAP9 0x0017
#define regXPB_RTR_DEST_MAP9_BASE_IDX 0
#define regXPB_RTR_DEST_MAP10 0x0018
#define regXPB_RTR_DEST_MAP10_BASE_IDX 0
#define regXPB_RTR_DEST_MAP11 0x0019
#define regXPB_RTR_DEST_MAP11_BASE_IDX 0
#define regXPB_RTR_DEST_MAP12 0x001a
#define regXPB_RTR_DEST_MAP12_BASE_IDX 0
#define regXPB_RTR_DEST_MAP13 0x001b
#define regXPB_RTR_DEST_MAP13_BASE_IDX 0
#define regXPB_CLG_CFG0 0x001c
#define regXPB_CLG_CFG0_BASE_IDX 0
#define regXPB_CLG_CFG1 0x001d
#define regXPB_CLG_CFG1_BASE_IDX 0
#define regXPB_CLG_CFG2 0x001e
#define regXPB_CLG_CFG2_BASE_IDX 0
#define regXPB_CLG_CFG3 0x001f
#define regXPB_CLG_CFG3_BASE_IDX 0
#define regXPB_CLG_CFG4 0x0020
#define regXPB_CLG_CFG4_BASE_IDX 0
#define regXPB_CLG_CFG5 0x0021
#define regXPB_CLG_CFG5_BASE_IDX 0
#define regXPB_CLG_CFG6 0x0022
#define regXPB_CLG_CFG6_BASE_IDX 0
#define regXPB_CLG_CFG7 0x0023
#define regXPB_CLG_CFG7_BASE_IDX 0
#define regXPB_CLG_EXTRA0 0x0024
#define regXPB_CLG_EXTRA0_BASE_IDX 0
#define regXPB_CLG_EXTRA1 0x0025
#define regXPB_CLG_EXTRA1_BASE_IDX 0
#define regXPB_CLG_EXTRA_MSK 0x0026
#define regXPB_CLG_EXTRA_MSK_BASE_IDX 0
#define regXPB_LB_ADDR 0x0027
#define regXPB_LB_ADDR_BASE_IDX 0
#define regXPB_HST_CFG 0x0028
#define regXPB_HST_CFG_BASE_IDX 0
#define regXPB_P2P_BAR_CFG 0x0029
#define regXPB_P2P_BAR_CFG_BASE_IDX 0
#define regXPB_P2P_BAR0 0x002a
#define regXPB_P2P_BAR0_BASE_IDX 0
#define regXPB_P2P_BAR1 0x002b
#define regXPB_P2P_BAR1_BASE_IDX 0
#define regXPB_P2P_BAR2 0x002c
#define regXPB_P2P_BAR2_BASE_IDX 0
#define regXPB_P2P_BAR3 0x002d
#define regXPB_P2P_BAR3_BASE_IDX 0
#define regXPB_P2P_BAR4 0x002e
#define regXPB_P2P_BAR4_BASE_IDX 0
#define regXPB_P2P_BAR5 0x002f
#define regXPB_P2P_BAR5_BASE_IDX 0
#define regXPB_P2P_BAR6 0x0030
#define regXPB_P2P_BAR6_BASE_IDX 0
#define regXPB_P2P_BAR7 0x0031
#define regXPB_P2P_BAR7_BASE_IDX 0
#define regXPB_P2P_BAR_SETUP 0x0032
#define regXPB_P2P_BAR_SETUP_BASE_IDX 0
#define regXPB_P2P_BAR_DELTA_ABOVE 0x0034
#define regXPB_P2P_BAR_DELTA_ABOVE_BASE_IDX 0
#define regXPB_P2P_BAR_DELTA_BELOW 0x0035
#define regXPB_P2P_BAR_DELTA_BELOW_BASE_IDX 0
#define regXPB_PEER_SYS_BAR0 0x0036
#define regXPB_PEER_SYS_BAR0_BASE_IDX 0
#define regXPB_PEER_SYS_BAR1 0x0037
#define regXPB_PEER_SYS_BAR1_BASE_IDX 0
#define regXPB_PEER_SYS_BAR2 0x0038
#define regXPB_PEER_SYS_BAR2_BASE_IDX 0
#define regXPB_PEER_SYS_BAR3 0x0039
#define regXPB_PEER_SYS_BAR3_BASE_IDX 0
#define regXPB_PEER_SYS_BAR4 0x003a
#define regXPB_PEER_SYS_BAR4_BASE_IDX 0
#define regXPB_PEER_SYS_BAR5 0x003b
#define regXPB_PEER_SYS_BAR5_BASE_IDX 0
#define regXPB_PEER_SYS_BAR6 0x003c
#define regXPB_PEER_SYS_BAR6_BASE_IDX 0
#define regXPB_PEER_SYS_BAR7 0x003d
#define regXPB_PEER_SYS_BAR7_BASE_IDX 0
#define regXPB_PEER_SYS_BAR8 0x003e
#define regXPB_PEER_SYS_BAR8_BASE_IDX 0
#define regXPB_PEER_SYS_BAR9 0x003f
#define regXPB_PEER_SYS_BAR9_BASE_IDX 0
#define regXPB_PEER_SYS_BAR10 0x0040
#define regXPB_PEER_SYS_BAR10_BASE_IDX 0
#define regXPB_PEER_SYS_BAR11 0x0041
#define regXPB_PEER_SYS_BAR11_BASE_IDX 0
#define regXPB_PEER_SYS_BAR12 0x0042
#define regXPB_PEER_SYS_BAR12_BASE_IDX 0
#define regXPB_PEER_SYS_BAR13 0x0043
#define regXPB_PEER_SYS_BAR13_BASE_IDX 0
#define regXPB_CLK_GAT 0x0044
#define regXPB_CLK_GAT_BASE_IDX 0
#define regXPB_INTF_CFG 0x0045
#define regXPB_INTF_CFG_BASE_IDX 0
#define regXPB_INTF_STS 0x0046
#define regXPB_INTF_STS_BASE_IDX 0
#define regXPB_PIPE_STS 0x0047
#define regXPB_PIPE_STS_BASE_IDX 0
#define regXPB_WCB_STS 0x0048
#define regXPB_WCB_STS_BASE_IDX 0
#define regXPB_MAP_INVERT_FLUSH_NUM_LSB 0x0049
#define regXPB_MAP_INVERT_FLUSH_NUM_LSB_BASE_IDX 0
#define regXPB_STICKY 0x004a
#define regXPB_STICKY_BASE_IDX 0
#define regXPB_STICKY_W1C 0x004b
#define regXPB_STICKY_W1C_BASE_IDX 0
#define regXPB_SUB_CTRL 0x004c
#define regXPB_SUB_CTRL_BASE_IDX 0
#define regXPB_PERF_KNOBS 0x004d
#define regXPB_PERF_KNOBS_BASE_IDX 0
#define regXPB_MISC_CFG 0x004e
#define regXPB_MISC_CFG_BASE_IDX 0
#define regXPB_INTF_CFG2 0x004f
#define regXPB_INTF_CFG2_BASE_IDX 0
#define regXPB_CLG_EXTRA_RD 0x0050
#define regXPB_CLG_EXTRA_RD_BASE_IDX 0
#define regXPB_CLG_EXTRA_MSK_RD 0x0051
#define regXPB_CLG_EXTRA_MSK_RD_BASE_IDX 0
#define regXPB_CLG_GFX_MATCH 0x0052
#define regXPB_CLG_GFX_MATCH_BASE_IDX 0
#define regXPB_CLG_GFX_MATCH_VLD 0x0053
#define regXPB_CLG_GFX_MATCH_VLD_BASE_IDX 0
#define regXPB_CLG_GFX_MATCH_MSK 0x0054
#define regXPB_CLG_GFX_MATCH_MSK_BASE_IDX 0
#define regXPB_CLG_MM_MATCH 0x0055
#define regXPB_CLG_MM_MATCH_BASE_IDX 0
#define regXPB_CLG_MM_MATCH_VLD 0x0056
#define regXPB_CLG_MM_MATCH_VLD_BASE_IDX 0
#define regXPB_CLG_MM_MATCH_MSK 0x0057
#define regXPB_CLG_MM_MATCH_MSK_BASE_IDX 0
#define regXPB_CLG_GFX_UNITID_MAPPING0 0x005a
#define regXPB_CLG_GFX_UNITID_MAPPING0_BASE_IDX 0
#define regXPB_CLG_GFX_UNITID_MAPPING1 0x005b
#define regXPB_CLG_GFX_UNITID_MAPPING1_BASE_IDX 0
#define regXPB_CLG_GFX_UNITID_MAPPING2 0x005c
#define regXPB_CLG_GFX_UNITID_MAPPING2_BASE_IDX 0
#define regXPB_CLG_GFX_UNITID_MAPPING3 0x005d
#define regXPB_CLG_GFX_UNITID_MAPPING3_BASE_IDX 0
#define regXPB_CLG_GFX_UNITID_MAPPING4 0x005e
#define regXPB_CLG_GFX_UNITID_MAPPING4_BASE_IDX 0
#define regXPB_CLG_GFX_UNITID_MAPPING5 0x005f
#define regXPB_CLG_GFX_UNITID_MAPPING5_BASE_IDX 0
#define regXPB_CLG_GFX_UNITID_MAPPING6 0x0060
#define regXPB_CLG_GFX_UNITID_MAPPING6_BASE_IDX 0
#define regXPB_CLG_GFX_UNITID_MAPPING7 0x0061
#define regXPB_CLG_GFX_UNITID_MAPPING7_BASE_IDX 0
#define regXPB_CLG_MM_UNITID_MAPPING0 0x0062
#define regXPB_CLG_MM_UNITID_MAPPING0_BASE_IDX 0
#define regXPB_CLG_MM_UNITID_MAPPING1 0x0063
#define regXPB_CLG_MM_UNITID_MAPPING1_BASE_IDX 0
#define regXPB_CLG_MM_UNITID_MAPPING2 0x0064
#define regXPB_CLG_MM_UNITID_MAPPING2_BASE_IDX 0
#define regXPB_CLG_MM_UNITID_MAPPING3 0x0065
#define regXPB_CLG_MM_UNITID_MAPPING3_BASE_IDX 0
// addressBlock: athub_rpbdec
// base address: 0x31d0
#define regATHUB_SHARED_VIRT_RESET_REQ 0x0074
#define regATHUB_SHARED_VIRT_RESET_REQ_BASE_IDX 0
#define regATHUB_MEM_POWER_LS 0x007f
#define regATHUB_MEM_POWER_LS_BASE_IDX 0
#define regATHUB_MISC_CNTL 0x0080
#define regATHUB_MISC_CNTL_BASE_IDX 0
#define regRPB_PASSPW_CONF 0x0081
#define regRPB_PASSPW_CONF_BASE_IDX 0
#define regRPB_BLOCKLEVEL_CONF 0x0082
#define regRPB_BLOCKLEVEL_CONF_BASE_IDX 0
#define regRPB_TAG_CONF 0x0083
#define regRPB_TAG_CONF_BASE_IDX 0
#define regRPB_ARB_CNTL 0x0085
#define regRPB_ARB_CNTL_BASE_IDX 0
#define regRPB_ARB_CNTL2 0x0086
#define regRPB_ARB_CNTL2_BASE_IDX 0
#define regRPB_BIF_CNTL 0x0087
#define regRPB_BIF_CNTL_BASE_IDX 0
#define regRPB_BIF_CNTL2 0x0088
#define regRPB_BIF_CNTL2_BASE_IDX 0
#define regRPB_SDPPORT_CNTL 0x0089
#define regRPB_SDPPORT_CNTL_BASE_IDX 0
#define regRPB_NBIF_SDPPORT_CNTL 0x008a
#define regRPB_NBIF_SDPPORT_CNTL_BASE_IDX 0
#define regRPB_DEINTRLV_COMBINE_CNTL 0x008c
#define regRPB_DEINTRLV_COMBINE_CNTL_BASE_IDX 0
#define regRPB_VC_SWITCH_RDWR 0x008d
#define regRPB_VC_SWITCH_RDWR_BASE_IDX 0
#define regRPB_ATS_CNTL3 0x008e
#define regRPB_ATS_CNTL3_BASE_IDX 0
#define regRPB_DF_SDPPORT_CNTL 0x008f
#define regRPB_DF_SDPPORT_CNTL_BASE_IDX 0
#define regRPB_ATS_CNTL 0x0090
#define regRPB_ATS_CNTL_BASE_IDX 0
#define regRPB_ATS_CNTL2 0x0091
#define regRPB_ATS_CNTL2_BASE_IDX 0
#define regRPB_PERFCOUNTER0_CFG 0x0092
#define regRPB_PERFCOUNTER0_CFG_BASE_IDX 0
#define regRPB_PERFCOUNTER1_CFG 0x0093
#define regRPB_PERFCOUNTER1_CFG_BASE_IDX 0
#define regRPB_PERFCOUNTER2_CFG 0x0094
#define regRPB_PERFCOUNTER2_CFG_BASE_IDX 0
#define regRPB_PERFCOUNTER3_CFG 0x0095
#define regRPB_PERFCOUNTER3_CFG_BASE_IDX 0
#define regRPB_PERFCOUNTER_RSLT_CNTL 0x0096
#define regRPB_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0
#define regRPB_PERF_COUNTER_CNTL 0x0097
#define regRPB_PERF_COUNTER_CNTL_BASE_IDX 0
#define regRPB_PERFCOUNTER_HI 0x0098
#define regRPB_PERFCOUNTER_HI_BASE_IDX 0
#define regRPB_PERFCOUNTER_LO 0x0099
#define regRPB_PERFCOUNTER_LO_BASE_IDX 0
#define regRPB_PERF_COUNTER_STATUS 0x009a
#define regRPB_PERF_COUNTER_STATUS_BASE_IDX 0
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,219 @@
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef _hdp_7_0_0_OFFSET_HEADER
#define _hdp_7_0_0_OFFSET_HEADER
// addressBlock: hdp_hdpdec
// base address: 0x3c80
#define regHDP_MMHUB_TLVL 0x0008
#define regHDP_MMHUB_TLVL_BASE_IDX 0
#define regHDP_MMHUB_UNITID 0x0009
#define regHDP_MMHUB_UNITID_BASE_IDX 0
#define regHDP_NONSURFACE_BASE 0x0040
#define regHDP_NONSURFACE_BASE_BASE_IDX 0
#define regHDP_NONSURFACE_INFO 0x0041
#define regHDP_NONSURFACE_INFO_BASE_IDX 0
#define regHDP_NONSURFACE_BASE_HI 0x0042
#define regHDP_NONSURFACE_BASE_HI_BASE_IDX 0
#define regHDP_SURFACE_WRITE_FLAGS 0x00c4
#define regHDP_SURFACE_WRITE_FLAGS_BASE_IDX 0
#define regHDP_SURFACE_READ_FLAGS 0x00c5
#define regHDP_SURFACE_READ_FLAGS_BASE_IDX 0
#define regHDP_SURFACE_WRITE_FLAGS_CLR 0x00c6
#define regHDP_SURFACE_WRITE_FLAGS_CLR_BASE_IDX 0
#define regHDP_SURFACE_READ_FLAGS_CLR 0x00c7
#define regHDP_SURFACE_READ_FLAGS_CLR_BASE_IDX 0
#define regHDP_NONSURF_FLAGS 0x00c8
#define regHDP_NONSURF_FLAGS_BASE_IDX 0
#define regHDP_NONSURF_FLAGS_CLR 0x00c9
#define regHDP_NONSURF_FLAGS_CLR_BASE_IDX 0
#define regHDP_SW_SEMAPHORE 0x00cd
#define regHDP_SW_SEMAPHORE_BASE_IDX 0
#define regHDP_DEBUG0 0x00ce
#define regHDP_DEBUG0_BASE_IDX 0
#define regHDP_LAST_SURFACE_HIT 0x00d0
#define regHDP_LAST_SURFACE_HIT_BASE_IDX 0
#define regHDP_OUTSTANDING_REQ 0x00d1
#define regHDP_OUTSTANDING_REQ_BASE_IDX 0
#define regHDP_HOST_PATH_CNTL 0x00d2
#define regHDP_HOST_PATH_CNTL_BASE_IDX 0
#define regHDP_MISC_CNTL 0x00d3
#define regHDP_MISC_CNTL_BASE_IDX 0
#define regHDP_MEM_POWER_CTRL 0x00d4
#define regHDP_MEM_POWER_CTRL_BASE_IDX 0
#define regHDP_CLK_CNTL 0x00d5
#define regHDP_CLK_CNTL_BASE_IDX 0
#define regHDP_MMHUB_CNTL 0x00d6
#define regHDP_MMHUB_CNTL_BASE_IDX 0
#define regHDP_XDP_BUSY_STS 0x00d7
#define regHDP_XDP_BUSY_STS_BASE_IDX 0
#define regHDP_XDP_MMHUB_ERROR 0x00d8
#define regHDP_XDP_MMHUB_ERROR_BASE_IDX 0
#define regHDP_XDP_MMHUB_ERROR_CLR 0x00da
#define regHDP_XDP_MMHUB_ERROR_CLR_BASE_IDX 0
#define regHDP_VERSION 0x00db
#define regHDP_VERSION_BASE_IDX 0
#define regHDP_MEMIO_CNTL 0x00f6
#define regHDP_MEMIO_CNTL_BASE_IDX 0
#define regHDP_MEMIO_ADDR 0x00f7
#define regHDP_MEMIO_ADDR_BASE_IDX 0
#define regHDP_MEMIO_STATUS 0x00f8
#define regHDP_MEMIO_STATUS_BASE_IDX 0
#define regHDP_MEMIO_WR_DATA 0x00f9
#define regHDP_MEMIO_WR_DATA_BASE_IDX 0
#define regHDP_MEMIO_RD_DATA 0x00fa
#define regHDP_MEMIO_RD_DATA_BASE_IDX 0
#define regHDP_XDP_DIRECT2HDP_FIRST 0x0100
#define regHDP_XDP_DIRECT2HDP_FIRST_BASE_IDX 0
#define regHDP_XDP_D2H_FLUSH 0x0101
#define regHDP_XDP_D2H_FLUSH_BASE_IDX 0
#define regHDP_XDP_D2H_BAR_UPDATE 0x0102
#define regHDP_XDP_D2H_BAR_UPDATE_BASE_IDX 0
#define regHDP_XDP_D2H_RSVD_3 0x0103
#define regHDP_XDP_D2H_RSVD_3_BASE_IDX 0
#define regHDP_XDP_D2H_RSVD_4 0x0104
#define regHDP_XDP_D2H_RSVD_4_BASE_IDX 0
#define regHDP_XDP_D2H_RSVD_5 0x0105
#define regHDP_XDP_D2H_RSVD_5_BASE_IDX 0
#define regHDP_XDP_D2H_RSVD_6 0x0106
#define regHDP_XDP_D2H_RSVD_6_BASE_IDX 0
#define regHDP_XDP_D2H_RSVD_7 0x0107
#define regHDP_XDP_D2H_RSVD_7_BASE_IDX 0
#define regHDP_XDP_D2H_RSVD_8 0x0108
#define regHDP_XDP_D2H_RSVD_8_BASE_IDX 0
#define regHDP_XDP_D2H_RSVD_9 0x0109
#define regHDP_XDP_D2H_RSVD_9_BASE_IDX 0
#define regHDP_XDP_D2H_RSVD_10 0x010a
#define regHDP_XDP_D2H_RSVD_10_BASE_IDX 0
#define regHDP_XDP_D2H_RSVD_11 0x010b
#define regHDP_XDP_D2H_RSVD_11_BASE_IDX 0
#define regHDP_XDP_D2H_RSVD_12 0x010c
#define regHDP_XDP_D2H_RSVD_12_BASE_IDX 0
#define regHDP_XDP_D2H_RSVD_13 0x010d
#define regHDP_XDP_D2H_RSVD_13_BASE_IDX 0
#define regHDP_XDP_D2H_RSVD_14 0x010e
#define regHDP_XDP_D2H_RSVD_14_BASE_IDX 0
#define regHDP_XDP_D2H_RSVD_15 0x010f
#define regHDP_XDP_D2H_RSVD_15_BASE_IDX 0
#define regHDP_XDP_D2H_RSVD_16 0x0110
#define regHDP_XDP_D2H_RSVD_16_BASE_IDX 0
#define regHDP_XDP_D2H_RSVD_17 0x0111
#define regHDP_XDP_D2H_RSVD_17_BASE_IDX 0
#define regHDP_XDP_D2H_RSVD_18 0x0112
#define regHDP_XDP_D2H_RSVD_18_BASE_IDX 0
#define regHDP_XDP_D2H_RSVD_19 0x0113
#define regHDP_XDP_D2H_RSVD_19_BASE_IDX 0
#define regHDP_XDP_D2H_RSVD_20 0x0114
#define regHDP_XDP_D2H_RSVD_20_BASE_IDX 0
#define regHDP_XDP_D2H_RSVD_21 0x0115
#define regHDP_XDP_D2H_RSVD_21_BASE_IDX 0
#define regHDP_XDP_D2H_RSVD_22 0x0116
#define regHDP_XDP_D2H_RSVD_22_BASE_IDX 0
#define regHDP_XDP_D2H_RSVD_23 0x0117
#define regHDP_XDP_D2H_RSVD_23_BASE_IDX 0
#define regHDP_XDP_D2H_RSVD_24 0x0118
#define regHDP_XDP_D2H_RSVD_24_BASE_IDX 0
#define regHDP_XDP_D2H_RSVD_25 0x0119
#define regHDP_XDP_D2H_RSVD_25_BASE_IDX 0
#define regHDP_XDP_D2H_RSVD_26 0x011a
#define regHDP_XDP_D2H_RSVD_26_BASE_IDX 0
#define regHDP_XDP_D2H_RSVD_27 0x011b
#define regHDP_XDP_D2H_RSVD_27_BASE_IDX 0
#define regHDP_XDP_D2H_RSVD_28 0x011c
#define regHDP_XDP_D2H_RSVD_28_BASE_IDX 0
#define regHDP_XDP_D2H_RSVD_29 0x011d
#define regHDP_XDP_D2H_RSVD_29_BASE_IDX 0
#define regHDP_XDP_D2H_RSVD_30 0x011e
#define regHDP_XDP_D2H_RSVD_30_BASE_IDX 0
#define regHDP_XDP_D2H_RSVD_31 0x011f
#define regHDP_XDP_D2H_RSVD_31_BASE_IDX 0
#define regHDP_XDP_D2H_RSVD_32 0x0120
#define regHDP_XDP_D2H_RSVD_32_BASE_IDX 0
#define regHDP_XDP_D2H_RSVD_33 0x0121
#define regHDP_XDP_D2H_RSVD_33_BASE_IDX 0
#define regHDP_XDP_D2H_RSVD_34 0x0122
#define regHDP_XDP_D2H_RSVD_34_BASE_IDX 0
#define regHDP_XDP_DIRECT2HDP_LAST 0x0123
#define regHDP_XDP_DIRECT2HDP_LAST_BASE_IDX 0
#define regHDP_XDP_P2P_BAR_CFG 0x0124
#define regHDP_XDP_P2P_BAR_CFG_BASE_IDX 0
#define regHDP_XDP_P2P_MBX_OFFSET 0x0125
#define regHDP_XDP_P2P_MBX_OFFSET_BASE_IDX 0
#define regHDP_XDP_P2P_MBX_ADDR0 0x0126
#define regHDP_XDP_P2P_MBX_ADDR0_BASE_IDX 0
#define regHDP_XDP_P2P_MBX_ADDR1 0x0127
#define regHDP_XDP_P2P_MBX_ADDR1_BASE_IDX 0
#define regHDP_XDP_P2P_MBX_ADDR2 0x0128
#define regHDP_XDP_P2P_MBX_ADDR2_BASE_IDX 0
#define regHDP_XDP_P2P_MBX_ADDR3 0x0129
#define regHDP_XDP_P2P_MBX_ADDR3_BASE_IDX 0
#define regHDP_XDP_P2P_MBX_ADDR4 0x012a
#define regHDP_XDP_P2P_MBX_ADDR4_BASE_IDX 0
#define regHDP_XDP_P2P_MBX_ADDR5 0x012b
#define regHDP_XDP_P2P_MBX_ADDR5_BASE_IDX 0
#define regHDP_XDP_P2P_MBX_ADDR6 0x012c
#define regHDP_XDP_P2P_MBX_ADDR6_BASE_IDX 0
#define regHDP_XDP_HDP_MBX_MC_CFG 0x012d
#define regHDP_XDP_HDP_MBX_MC_CFG_BASE_IDX 0
#define regHDP_XDP_HDP_MC_CFG 0x012e
#define regHDP_XDP_HDP_MC_CFG_BASE_IDX 0
#define regHDP_XDP_HST_CFG 0x012f
#define regHDP_XDP_HST_CFG_BASE_IDX 0
#define regHDP_XDP_HDP_IPH_CFG 0x0131
#define regHDP_XDP_HDP_IPH_CFG_BASE_IDX 0
#define regHDP_XDP_P2P_BAR0 0x0134
#define regHDP_XDP_P2P_BAR0_BASE_IDX 0
#define regHDP_XDP_P2P_BAR1 0x0135
#define regHDP_XDP_P2P_BAR1_BASE_IDX 0
#define regHDP_XDP_P2P_BAR2 0x0136
#define regHDP_XDP_P2P_BAR2_BASE_IDX 0
#define regHDP_XDP_P2P_BAR3 0x0137
#define regHDP_XDP_P2P_BAR3_BASE_IDX 0
#define regHDP_XDP_P2P_BAR4 0x0138
#define regHDP_XDP_P2P_BAR4_BASE_IDX 0
#define regHDP_XDP_P2P_BAR5 0x0139
#define regHDP_XDP_P2P_BAR5_BASE_IDX 0
#define regHDP_XDP_P2P_BAR6 0x013a
#define regHDP_XDP_P2P_BAR6_BASE_IDX 0
#define regHDP_XDP_P2P_BAR7 0x013b
#define regHDP_XDP_P2P_BAR7_BASE_IDX 0
#define regHDP_XDP_FLUSH_ARMED_STS 0x013c
#define regHDP_XDP_FLUSH_ARMED_STS_BASE_IDX 0
#define regHDP_XDP_FLUSH_CNTR0_STS 0x013d
#define regHDP_XDP_FLUSH_CNTR0_STS_BASE_IDX 0
#define regHDP_XDP_STICKY 0x013f
#define regHDP_XDP_STICKY_BASE_IDX 0
#define regHDP_XDP_CHKN 0x0140
#define regHDP_XDP_CHKN_BASE_IDX 0
#define regHDP_XDP_BARS_ADDR_39_36 0x0144
#define regHDP_XDP_BARS_ADDR_39_36_BASE_IDX 0
#define regHDP_XDP_MC_VM_FB_LOCATION_BASE 0x0145
#define regHDP_XDP_MC_VM_FB_LOCATION_BASE_BASE_IDX 0
#define regHDP_XDP_GPU_IOV_VIOLATION_LOG 0x0148
#define regHDP_XDP_GPU_IOV_VIOLATION_LOG_BASE_IDX 0
#define regHDP_XDP_GPU_IOV_VIOLATION_LOG2 0x0149
#define regHDP_XDP_GPU_IOV_VIOLATION_LOG2_BASE_IDX 0
#endif

View File

@ -0,0 +1,735 @@
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef _hdp_7_0_0_SH_MASK_HEADER
#define _hdp_7_0_0_SH_MASK_HEADER
// addressBlock: hdp_hdpdec
//HDP_MMHUB_TLVL
#define HDP_MMHUB_TLVL__HDP_WR_TLVL__SHIFT 0x0
#define HDP_MMHUB_TLVL__HDP_RD_TLVL__SHIFT 0x4
#define HDP_MMHUB_TLVL__XDP_WR_TLVL__SHIFT 0x8
#define HDP_MMHUB_TLVL__XDP_RD_TLVL__SHIFT 0xc
#define HDP_MMHUB_TLVL__XDP_MBX_WR_TLVL__SHIFT 0x10
#define HDP_MMHUB_TLVL__HDP_WR_TLVL_MASK 0x0000000FL
#define HDP_MMHUB_TLVL__HDP_RD_TLVL_MASK 0x000000F0L
#define HDP_MMHUB_TLVL__XDP_WR_TLVL_MASK 0x00000F00L
#define HDP_MMHUB_TLVL__XDP_RD_TLVL_MASK 0x0000F000L
#define HDP_MMHUB_TLVL__XDP_MBX_WR_TLVL_MASK 0x000F0000L
//HDP_MMHUB_UNITID
#define HDP_MMHUB_UNITID__HDP_UNITID__SHIFT 0x0
#define HDP_MMHUB_UNITID__XDP_UNITID__SHIFT 0x8
#define HDP_MMHUB_UNITID__XDP_MBX_UNITID__SHIFT 0x10
#define HDP_MMHUB_UNITID__HDP_UNITID_MASK 0x0000003FL
#define HDP_MMHUB_UNITID__XDP_UNITID_MASK 0x00003F00L
#define HDP_MMHUB_UNITID__XDP_MBX_UNITID_MASK 0x003F0000L
//HDP_NONSURFACE_BASE
#define HDP_NONSURFACE_BASE__NONSURF_BASE_39_8__SHIFT 0x0
#define HDP_NONSURFACE_BASE__NONSURF_BASE_39_8_MASK 0xFFFFFFFFL
//HDP_NONSURFACE_INFO
#define HDP_NONSURFACE_INFO__NONSURF_SWAP__SHIFT 0x4
#define HDP_NONSURFACE_INFO__NONSURF_VMID__SHIFT 0x8
#define HDP_NONSURFACE_INFO__NONSURF_SWAP_MASK 0x00000030L
#define HDP_NONSURFACE_INFO__NONSURF_VMID_MASK 0x00000F00L
//HDP_NONSURFACE_BASE_HI
#define HDP_NONSURFACE_BASE_HI__NONSURF_BASE_47_40__SHIFT 0x0
#define HDP_NONSURFACE_BASE_HI__NONSURF_BASE_47_40_MASK 0x000000FFL
//HDP_SURFACE_WRITE_FLAGS
#define HDP_SURFACE_WRITE_FLAGS__SURF0_WRITE_FLAG__SHIFT 0x0
#define HDP_SURFACE_WRITE_FLAGS__SURF1_WRITE_FLAG__SHIFT 0x1
#define HDP_SURFACE_WRITE_FLAGS__SURF0_WRITE_FLAG_MASK 0x00000001L
#define HDP_SURFACE_WRITE_FLAGS__SURF1_WRITE_FLAG_MASK 0x00000002L
//HDP_SURFACE_READ_FLAGS
#define HDP_SURFACE_READ_FLAGS__SURF0_READ_FLAG__SHIFT 0x0
#define HDP_SURFACE_READ_FLAGS__SURF1_READ_FLAG__SHIFT 0x1
#define HDP_SURFACE_READ_FLAGS__SURF0_READ_FLAG_MASK 0x00000001L
#define HDP_SURFACE_READ_FLAGS__SURF1_READ_FLAG_MASK 0x00000002L
//HDP_SURFACE_WRITE_FLAGS_CLR
#define HDP_SURFACE_WRITE_FLAGS_CLR__SURF0_WRITE_FLAG_CLR__SHIFT 0x0
#define HDP_SURFACE_WRITE_FLAGS_CLR__SURF1_WRITE_FLAG_CLR__SHIFT 0x1
#define HDP_SURFACE_WRITE_FLAGS_CLR__SURF0_WRITE_FLAG_CLR_MASK 0x00000001L
#define HDP_SURFACE_WRITE_FLAGS_CLR__SURF1_WRITE_FLAG_CLR_MASK 0x00000002L
//HDP_SURFACE_READ_FLAGS_CLR
#define HDP_SURFACE_READ_FLAGS_CLR__SURF0_READ_FLAG_CLR__SHIFT 0x0
#define HDP_SURFACE_READ_FLAGS_CLR__SURF1_READ_FLAG_CLR__SHIFT 0x1
#define HDP_SURFACE_READ_FLAGS_CLR__SURF0_READ_FLAG_CLR_MASK 0x00000001L
#define HDP_SURFACE_READ_FLAGS_CLR__SURF1_READ_FLAG_CLR_MASK 0x00000002L
//HDP_NONSURF_FLAGS
#define HDP_NONSURF_FLAGS__NONSURF_WRITE_FLAG__SHIFT 0x0
#define HDP_NONSURF_FLAGS__NONSURF_READ_FLAG__SHIFT 0x1
#define HDP_NONSURF_FLAGS__NONSURF_WRITE_FLAG_MASK 0x00000001L
#define HDP_NONSURF_FLAGS__NONSURF_READ_FLAG_MASK 0x00000002L
//HDP_NONSURF_FLAGS_CLR
#define HDP_NONSURF_FLAGS_CLR__NONSURF_WRITE_FLAG_CLR__SHIFT 0x0
#define HDP_NONSURF_FLAGS_CLR__NONSURF_READ_FLAG_CLR__SHIFT 0x1
#define HDP_NONSURF_FLAGS_CLR__NONSURF_WRITE_FLAG_CLR_MASK 0x00000001L
#define HDP_NONSURF_FLAGS_CLR__NONSURF_READ_FLAG_CLR_MASK 0x00000002L
//HDP_SW_SEMAPHORE
#define HDP_SW_SEMAPHORE__SW_SEMAPHORE__SHIFT 0x0
#define HDP_SW_SEMAPHORE__SW_SEMAPHORE_MASK 0xFFFFFFFFL
//HDP_DEBUG0
#define HDP_DEBUG0__HDP_DEBUG__SHIFT 0x0
#define HDP_DEBUG0__HDP_DEBUG_MASK 0xFFFFFFFFL
//HDP_LAST_SURFACE_HIT
#define HDP_LAST_SURFACE_HIT__LAST_SURFACE_HIT__SHIFT 0x0
#define HDP_LAST_SURFACE_HIT__LAST_SURFACE_HIT_MASK 0x00000003L
//HDP_OUTSTANDING_REQ
#define HDP_OUTSTANDING_REQ__WRITE_REQ__SHIFT 0x0
#define HDP_OUTSTANDING_REQ__READ_REQ__SHIFT 0x8
#define HDP_OUTSTANDING_REQ__WRITE_REQ_MASK 0x000000FFL
#define HDP_OUTSTANDING_REQ__READ_REQ_MASK 0x0000FF00L
//HDP_HOST_PATH_CNTL
#define HDP_HOST_PATH_CNTL__WR_STALL_TIMER__SHIFT 0x9
#define HDP_HOST_PATH_CNTL__RD_STALL_TIMER__SHIFT 0xb
#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_TIMER_PRELOAD_CFG__SHIFT 0x12
#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_TIMER__SHIFT 0x13
#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_EN__SHIFT 0x15
#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_64B_EN__SHIFT 0x16
#define HDP_HOST_PATH_CNTL__ALL_SURFACES_DIS__SHIFT 0x1d
#define HDP_HOST_PATH_CNTL__WR_STALL_TIMER_MASK 0x00000600L
#define HDP_HOST_PATH_CNTL__RD_STALL_TIMER_MASK 0x00001800L
#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_TIMER_PRELOAD_CFG_MASK 0x00040000L
#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_TIMER_MASK 0x00180000L
#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_EN_MASK 0x00200000L
#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_64B_EN_MASK 0x00400000L
#define HDP_HOST_PATH_CNTL__ALL_SURFACES_DIS_MASK 0x20000000L
//HDP_MISC_CNTL
#define HDP_MISC_CNTL__IDLE_HYSTERESIS_CNTL__SHIFT 0x2
#define HDP_MISC_CNTL__OUTSTANDING_WRITE_COUNT_1024__SHIFT 0x5
#define HDP_MISC_CNTL__MMHUB_EARLY_WRACK_ENABLE__SHIFT 0x8
#define HDP_MISC_CNTL__EARLY_WRACK_MISSING_PROTECT_ENABLE__SHIFT 0x9
#define HDP_MISC_CNTL__SIMULTANEOUS_READS_WRITES__SHIFT 0xb
#define HDP_MISC_CNTL__READ_BUFFER_WATERMARK__SHIFT 0xe
#define HDP_MISC_CNTL__NACK_ENABLE__SHIFT 0x13
#define HDP_MISC_CNTL__ATOMIC_NACK_ENABLE__SHIFT 0x14
#define HDP_MISC_CNTL__FED_ENABLE__SHIFT 0x15
#define HDP_MISC_CNTL__ATOMIC_FED_ENABLE__SHIFT 0x16
#define HDP_MISC_CNTL__SYSHUB_CHANNEL_PRIORITY__SHIFT 0x17
#define HDP_MISC_CNTL__MMHUB_WRBURST_ENABLE__SHIFT 0x18
#define HDP_MISC_CNTL__MMHUB_WRBURST_SIZE__SHIFT 0x1e
#define HDP_MISC_CNTL__IDLE_HYSTERESIS_CNTL_MASK 0x0000000CL
#define HDP_MISC_CNTL__OUTSTANDING_WRITE_COUNT_1024_MASK 0x00000020L
#define HDP_MISC_CNTL__MMHUB_EARLY_WRACK_ENABLE_MASK 0x00000100L
#define HDP_MISC_CNTL__EARLY_WRACK_MISSING_PROTECT_ENABLE_MASK 0x00000200L
#define HDP_MISC_CNTL__SIMULTANEOUS_READS_WRITES_MASK 0x00000800L
#define HDP_MISC_CNTL__READ_BUFFER_WATERMARK_MASK 0x0000C000L
#define HDP_MISC_CNTL__NACK_ENABLE_MASK 0x00080000L
#define HDP_MISC_CNTL__ATOMIC_NACK_ENABLE_MASK 0x00100000L
#define HDP_MISC_CNTL__FED_ENABLE_MASK 0x00200000L
#define HDP_MISC_CNTL__ATOMIC_FED_ENABLE_MASK 0x00400000L
#define HDP_MISC_CNTL__SYSHUB_CHANNEL_PRIORITY_MASK 0x00800000L
#define HDP_MISC_CNTL__MMHUB_WRBURST_ENABLE_MASK 0x01000000L
#define HDP_MISC_CNTL__MMHUB_WRBURST_SIZE_MASK 0x40000000L
//HDP_MEM_POWER_CTRL
#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_CTRL_EN__SHIFT 0x0
#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_LS_EN__SHIFT 0x1
#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_DS_EN__SHIFT 0x2
#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_SD_EN__SHIFT 0x3
#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_IDLE_HYSTERESIS__SHIFT 0x4
#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_UP_RECOVER_DELAY__SHIFT 0x8
#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_DOWN_ENTER_DELAY__SHIFT 0xe
#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN__SHIFT 0x10
#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN__SHIFT 0x11
#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_DS_EN__SHIFT 0x12
#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_SD_EN__SHIFT 0x13
#define HDP_MEM_POWER_CTRL__RC_MEM_IDLE_HYSTERESIS__SHIFT 0x14
#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_UP_RECOVER_DELAY__SHIFT 0x18
#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_DOWN_ENTER_DELAY__SHIFT 0x1e
#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_CTRL_EN_MASK 0x00000001L
#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_LS_EN_MASK 0x00000002L
#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_DS_EN_MASK 0x00000004L
#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_SD_EN_MASK 0x00000008L
#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_IDLE_HYSTERESIS_MASK 0x00000070L
#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_UP_RECOVER_DELAY_MASK 0x00003F00L
#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_DOWN_ENTER_DELAY_MASK 0x0000C000L
#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L
#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L
#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_DS_EN_MASK 0x00040000L
#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_SD_EN_MASK 0x00080000L
#define HDP_MEM_POWER_CTRL__RC_MEM_IDLE_HYSTERESIS_MASK 0x00700000L
#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_UP_RECOVER_DELAY_MASK 0x3F000000L
#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_DOWN_ENTER_DELAY_MASK 0xC0000000L
//HDP_CLK_CNTL
#define HDP_CLK_CNTL__REG_CLK_ENABLE_COUNT__SHIFT 0x0
#define HDP_CLK_CNTL__ATOMIC_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x1a
#define HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x1b
#define HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE__SHIFT 0x1c
#define HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE__SHIFT 0x1d
#define HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE__SHIFT 0x1e
#define HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE__SHIFT 0x1f
#define HDP_CLK_CNTL__REG_CLK_ENABLE_COUNT_MASK 0x0000000FL
#define HDP_CLK_CNTL__ATOMIC_MEM_CLK_SOFT_OVERRIDE_MASK 0x04000000L
#define HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK 0x08000000L
#define HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK 0x10000000L
#define HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK 0x20000000L
#define HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK 0x40000000L
#define HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK 0x80000000L
//HDP_MMHUB_CNTL
#define HDP_MMHUB_CNTL__HDP_MMHUB_RO__SHIFT 0x0
#define HDP_MMHUB_CNTL__HDP_MMHUB_GCC__SHIFT 0x1
#define HDP_MMHUB_CNTL__HDP_MMHUB_SNOOP__SHIFT 0x2
#define HDP_MMHUB_CNTL__HDP_MMHUB_RO_OVERRIDE__SHIFT 0x4
#define HDP_MMHUB_CNTL__HDP_MMHUB_GCC_OVERRIDE__SHIFT 0x5
#define HDP_MMHUB_CNTL__HDP_MMHUB_SNOOP_OVERRIDE__SHIFT 0x6
#define HDP_MMHUB_CNTL__HDP_MMHUB_RO_MASK 0x00000001L
#define HDP_MMHUB_CNTL__HDP_MMHUB_GCC_MASK 0x00000002L
#define HDP_MMHUB_CNTL__HDP_MMHUB_SNOOP_MASK 0x00000004L
#define HDP_MMHUB_CNTL__HDP_MMHUB_RO_OVERRIDE_MASK 0x00000010L
#define HDP_MMHUB_CNTL__HDP_MMHUB_GCC_OVERRIDE_MASK 0x00000020L
#define HDP_MMHUB_CNTL__HDP_MMHUB_SNOOP_OVERRIDE_MASK 0x00000040L
//HDP_XDP_BUSY_STS
#define HDP_XDP_BUSY_STS__BUSY_BITS_0__SHIFT 0x0
#define HDP_XDP_BUSY_STS__BUSY_BITS_1__SHIFT 0x1
#define HDP_XDP_BUSY_STS__BUSY_BITS_2__SHIFT 0x2
#define HDP_XDP_BUSY_STS__BUSY_BITS_3__SHIFT 0x3
#define HDP_XDP_BUSY_STS__BUSY_BITS_4__SHIFT 0x4
#define HDP_XDP_BUSY_STS__BUSY_BITS_5__SHIFT 0x5
#define HDP_XDP_BUSY_STS__BUSY_BITS_6__SHIFT 0x6
#define HDP_XDP_BUSY_STS__BUSY_BITS_7__SHIFT 0x7
#define HDP_XDP_BUSY_STS__BUSY_BITS_8__SHIFT 0x8
#define HDP_XDP_BUSY_STS__BUSY_BITS_9__SHIFT 0x9
#define HDP_XDP_BUSY_STS__BUSY_BITS_10__SHIFT 0xa
#define HDP_XDP_BUSY_STS__BUSY_BITS_11__SHIFT 0xb
#define HDP_XDP_BUSY_STS__BUSY_BITS_12__SHIFT 0xc
#define HDP_XDP_BUSY_STS__BUSY_BITS_13__SHIFT 0xd
#define HDP_XDP_BUSY_STS__BUSY_BITS_14__SHIFT 0xe
#define HDP_XDP_BUSY_STS__BUSY_BITS_15__SHIFT 0xf
#define HDP_XDP_BUSY_STS__BUSY_BITS_16__SHIFT 0x10
#define HDP_XDP_BUSY_STS__BUSY_BITS_17__SHIFT 0x11
#define HDP_XDP_BUSY_STS__BUSY_BITS_18__SHIFT 0x12
#define HDP_XDP_BUSY_STS__BUSY_BITS_19__SHIFT 0x13
#define HDP_XDP_BUSY_STS__BUSY_BITS_20__SHIFT 0x14
#define HDP_XDP_BUSY_STS__BUSY_BITS_21__SHIFT 0x15
#define HDP_XDP_BUSY_STS__BUSY_BITS_22__SHIFT 0x16
#define HDP_XDP_BUSY_STS__BUSY_BITS_23__SHIFT 0x17
#define HDP_XDP_BUSY_STS__Z_FENCE_BIT__SHIFT 0x18
#define HDP_XDP_BUSY_STS__BUSY_BITS_0_MASK 0x00000001L
#define HDP_XDP_BUSY_STS__BUSY_BITS_1_MASK 0x00000002L
#define HDP_XDP_BUSY_STS__BUSY_BITS_2_MASK 0x00000004L
#define HDP_XDP_BUSY_STS__BUSY_BITS_3_MASK 0x00000008L
#define HDP_XDP_BUSY_STS__BUSY_BITS_4_MASK 0x00000010L
#define HDP_XDP_BUSY_STS__BUSY_BITS_5_MASK 0x00000020L
#define HDP_XDP_BUSY_STS__BUSY_BITS_6_MASK 0x00000040L
#define HDP_XDP_BUSY_STS__BUSY_BITS_7_MASK 0x00000080L
#define HDP_XDP_BUSY_STS__BUSY_BITS_8_MASK 0x00000100L
#define HDP_XDP_BUSY_STS__BUSY_BITS_9_MASK 0x00000200L
#define HDP_XDP_BUSY_STS__BUSY_BITS_10_MASK 0x00000400L
#define HDP_XDP_BUSY_STS__BUSY_BITS_11_MASK 0x00000800L
#define HDP_XDP_BUSY_STS__BUSY_BITS_12_MASK 0x00001000L
#define HDP_XDP_BUSY_STS__BUSY_BITS_13_MASK 0x00002000L
#define HDP_XDP_BUSY_STS__BUSY_BITS_14_MASK 0x00004000L
#define HDP_XDP_BUSY_STS__BUSY_BITS_15_MASK 0x00008000L
#define HDP_XDP_BUSY_STS__BUSY_BITS_16_MASK 0x00010000L
#define HDP_XDP_BUSY_STS__BUSY_BITS_17_MASK 0x00020000L
#define HDP_XDP_BUSY_STS__BUSY_BITS_18_MASK 0x00040000L
#define HDP_XDP_BUSY_STS__BUSY_BITS_19_MASK 0x00080000L
#define HDP_XDP_BUSY_STS__BUSY_BITS_20_MASK 0x00100000L
#define HDP_XDP_BUSY_STS__BUSY_BITS_21_MASK 0x00200000L
#define HDP_XDP_BUSY_STS__BUSY_BITS_22_MASK 0x00400000L
#define HDP_XDP_BUSY_STS__BUSY_BITS_23_MASK 0x00800000L
#define HDP_XDP_BUSY_STS__Z_FENCE_BIT_MASK 0x01000000L
//HDP_XDP_MMHUB_ERROR
#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_01__SHIFT 0x1
#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_10__SHIFT 0x2
#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_11__SHIFT 0x3
#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_01__SHIFT 0x5
#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_10__SHIFT 0x6
#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_11__SHIFT 0x7
#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_01__SHIFT 0x9
#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_10__SHIFT 0xa
#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_11__SHIFT 0xb
#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_01__SHIFT 0xd
#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_10__SHIFT 0xe
#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_11__SHIFT 0xf
#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_01__SHIFT 0x11
#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_10__SHIFT 0x12
#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_11__SHIFT 0x13
#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_01__SHIFT 0x15
#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_10__SHIFT 0x16
#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_11__SHIFT 0x17
#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_01_MASK 0x00000002L
#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_10_MASK 0x00000004L
#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_11_MASK 0x00000008L
#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_01_MASK 0x00000020L
#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_10_MASK 0x00000040L
#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_11_MASK 0x00000080L
#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_01_MASK 0x00000200L
#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_10_MASK 0x00000400L
#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_11_MASK 0x00000800L
#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_01_MASK 0x00002000L
#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_10_MASK 0x00004000L
#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_11_MASK 0x00008000L
#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_01_MASK 0x00020000L
#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_10_MASK 0x00040000L
#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_11_MASK 0x00080000L
#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_01_MASK 0x00200000L
#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_10_MASK 0x00400000L
#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_11_MASK 0x00800000L
//HDP_XDP_MMHUB_ERROR_CLR
#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BRESP_01_CLR__SHIFT 0x1
#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BRESP_10_CLR__SHIFT 0x2
#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BRESP_11_CLR__SHIFT 0x3
#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BUSER_FED_CLR__SHIFT 0x4
#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BUSER_NACK_01_CLR__SHIFT 0x5
#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BUSER_NACK_10_CLR__SHIFT 0x6
#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BUSER_NACK_11_CLR__SHIFT 0x7
#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RRESP_01_CLR__SHIFT 0x9
#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RRESP_10_CLR__SHIFT 0xa
#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RRESP_11_CLR__SHIFT 0xb
#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RUSER_FED_CLR__SHIFT 0xc
#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RUSER_NACK_01_CLR__SHIFT 0xd
#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RUSER_NACK_10_CLR__SHIFT 0xe
#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RUSER_NACK_11_CLR__SHIFT 0xf
#define HDP_XDP_MMHUB_ERROR_CLR__HDP_WUSER_FED_CLR__SHIFT 0x10
#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BRESP_01_CLR__SHIFT 0x11
#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BRESP_10_CLR__SHIFT 0x12
#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BRESP_11_CLR__SHIFT 0x13
#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BUSER_NACK_01_CLR__SHIFT 0x15
#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BUSER_NACK_10_CLR__SHIFT 0x16
#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BUSER_NACK_11_CLR__SHIFT 0x17
#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BRESP_01_CLR_MASK 0x00000002L
#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BRESP_10_CLR_MASK 0x00000004L
#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BRESP_11_CLR_MASK 0x00000008L
#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BUSER_FED_CLR_MASK 0x00000010L
#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BUSER_NACK_01_CLR_MASK 0x00000020L
#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BUSER_NACK_10_CLR_MASK 0x00000040L
#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BUSER_NACK_11_CLR_MASK 0x00000080L
#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RRESP_01_CLR_MASK 0x00000200L
#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RRESP_10_CLR_MASK 0x00000400L
#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RRESP_11_CLR_MASK 0x00000800L
#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RUSER_FED_CLR_MASK 0x00001000L
#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RUSER_NACK_01_CLR_MASK 0x00002000L
#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RUSER_NACK_10_CLR_MASK 0x00004000L
#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RUSER_NACK_11_CLR_MASK 0x00008000L
#define HDP_XDP_MMHUB_ERROR_CLR__HDP_WUSER_FED_CLR_MASK 0x00010000L
#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BRESP_01_CLR_MASK 0x00020000L
#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BRESP_10_CLR_MASK 0x00040000L
#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BRESP_11_CLR_MASK 0x00080000L
#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BUSER_NACK_01_CLR_MASK 0x00200000L
#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BUSER_NACK_10_CLR_MASK 0x00400000L
#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BUSER_NACK_11_CLR_MASK 0x00800000L
//HDP_VERSION
#define HDP_VERSION__MINVER__SHIFT 0x0
#define HDP_VERSION__MAJVER__SHIFT 0x8
#define HDP_VERSION__REV__SHIFT 0x10
#define HDP_VERSION__MINVER_MASK 0x000000FFL
#define HDP_VERSION__MAJVER_MASK 0x0000FF00L
#define HDP_VERSION__REV_MASK 0x00FF0000L
//HDP_MEMIO_CNTL
#define HDP_MEMIO_CNTL__MEMIO_SEND__SHIFT 0x0
#define HDP_MEMIO_CNTL__MEMIO_OP__SHIFT 0x1
#define HDP_MEMIO_CNTL__MEMIO_BE__SHIFT 0x2
#define HDP_MEMIO_CNTL__MEMIO_WR_STROBE__SHIFT 0x6
#define HDP_MEMIO_CNTL__MEMIO_RD_STROBE__SHIFT 0x7
#define HDP_MEMIO_CNTL__MEMIO_ADDR_UPPER__SHIFT 0x8
#define HDP_MEMIO_CNTL__MEMIO_CLR_WR_ERROR__SHIFT 0xe
#define HDP_MEMIO_CNTL__MEMIO_CLR_RD_ERROR__SHIFT 0xf
#define HDP_MEMIO_CNTL__MEMIO_VF__SHIFT 0x10
#define HDP_MEMIO_CNTL__MEMIO_VFID__SHIFT 0x11
#define HDP_MEMIO_CNTL__MEMIO_SEND_MASK 0x00000001L
#define HDP_MEMIO_CNTL__MEMIO_OP_MASK 0x00000002L
#define HDP_MEMIO_CNTL__MEMIO_BE_MASK 0x0000003CL
#define HDP_MEMIO_CNTL__MEMIO_WR_STROBE_MASK 0x00000040L
#define HDP_MEMIO_CNTL__MEMIO_RD_STROBE_MASK 0x00000080L
#define HDP_MEMIO_CNTL__MEMIO_ADDR_UPPER_MASK 0x00003F00L
#define HDP_MEMIO_CNTL__MEMIO_CLR_WR_ERROR_MASK 0x00004000L
#define HDP_MEMIO_CNTL__MEMIO_CLR_RD_ERROR_MASK 0x00008000L
#define HDP_MEMIO_CNTL__MEMIO_VF_MASK 0x00010000L
#define HDP_MEMIO_CNTL__MEMIO_VFID_MASK 0x003E0000L
//HDP_MEMIO_ADDR
#define HDP_MEMIO_ADDR__MEMIO_ADDR_LOWER__SHIFT 0x0
#define HDP_MEMIO_ADDR__MEMIO_ADDR_LOWER_MASK 0xFFFFFFFFL
//HDP_MEMIO_STATUS
#define HDP_MEMIO_STATUS__MEMIO_WR_STATUS__SHIFT 0x0
#define HDP_MEMIO_STATUS__MEMIO_RD_STATUS__SHIFT 0x1
#define HDP_MEMIO_STATUS__MEMIO_WR_ERROR__SHIFT 0x2
#define HDP_MEMIO_STATUS__MEMIO_RD_ERROR__SHIFT 0x3
#define HDP_MEMIO_STATUS__MEMIO_WR_STATUS_MASK 0x00000001L
#define HDP_MEMIO_STATUS__MEMIO_RD_STATUS_MASK 0x00000002L
#define HDP_MEMIO_STATUS__MEMIO_WR_ERROR_MASK 0x00000004L
#define HDP_MEMIO_STATUS__MEMIO_RD_ERROR_MASK 0x00000008L
//HDP_MEMIO_WR_DATA
#define HDP_MEMIO_WR_DATA__MEMIO_WR_DATA__SHIFT 0x0
#define HDP_MEMIO_WR_DATA__MEMIO_WR_DATA_MASK 0xFFFFFFFFL
//HDP_MEMIO_RD_DATA
#define HDP_MEMIO_RD_DATA__MEMIO_RD_DATA__SHIFT 0x0
#define HDP_MEMIO_RD_DATA__MEMIO_RD_DATA_MASK 0xFFFFFFFFL
//HDP_XDP_DIRECT2HDP_FIRST
#define HDP_XDP_DIRECT2HDP_FIRST__RESERVED__SHIFT 0x0
#define HDP_XDP_DIRECT2HDP_FIRST__RESERVED_MASK 0xFFFFFFFFL
//HDP_XDP_D2H_FLUSH
#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_FLUSH_NUM__SHIFT 0x0
#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_MBX_ENC_DATA__SHIFT 0x4
#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_MBX_ADDR_SEL__SHIFT 0x8
#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_XPB_CLG__SHIFT 0xb
#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_SEND_HOST__SHIFT 0x10
#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_ALTER_FLUSH_NUM__SHIFT 0x12
#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_RSVD_0__SHIFT 0x13
#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_RSVD_1__SHIFT 0x14
#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_FLUSH_NUM_MASK 0x0000000FL
#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_MBX_ENC_DATA_MASK 0x000000F0L
#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_MBX_ADDR_SEL_MASK 0x00000700L
#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_XPB_CLG_MASK 0x0000F800L
#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_SEND_HOST_MASK 0x00010000L
#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_ALTER_FLUSH_NUM_MASK 0x00040000L
#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_RSVD_0_MASK 0x00080000L
#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_RSVD_1_MASK 0x00100000L
//HDP_XDP_D2H_BAR_UPDATE
#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_ADDR__SHIFT 0x0
#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_FLUSH_NUM__SHIFT 0x10
#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_BAR_NUM__SHIFT 0x14
#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_ADDR_MASK 0x0000FFFFL
#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_FLUSH_NUM_MASK 0x000F0000L
#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_BAR_NUM_MASK 0x00700000L
//HDP_XDP_D2H_RSVD_3
#define HDP_XDP_D2H_RSVD_3__RESERVED__SHIFT 0x0
#define HDP_XDP_D2H_RSVD_3__RESERVED_MASK 0xFFFFFFFFL
//HDP_XDP_D2H_RSVD_4
#define HDP_XDP_D2H_RSVD_4__RESERVED__SHIFT 0x0
#define HDP_XDP_D2H_RSVD_4__RESERVED_MASK 0xFFFFFFFFL
//HDP_XDP_D2H_RSVD_5
#define HDP_XDP_D2H_RSVD_5__RESERVED__SHIFT 0x0
#define HDP_XDP_D2H_RSVD_5__RESERVED_MASK 0xFFFFFFFFL
//HDP_XDP_D2H_RSVD_6
#define HDP_XDP_D2H_RSVD_6__RESERVED__SHIFT 0x0
#define HDP_XDP_D2H_RSVD_6__RESERVED_MASK 0xFFFFFFFFL
//HDP_XDP_D2H_RSVD_7
#define HDP_XDP_D2H_RSVD_7__RESERVED__SHIFT 0x0
#define HDP_XDP_D2H_RSVD_7__RESERVED_MASK 0xFFFFFFFFL
//HDP_XDP_D2H_RSVD_8
#define HDP_XDP_D2H_RSVD_8__RESERVED__SHIFT 0x0
#define HDP_XDP_D2H_RSVD_8__RESERVED_MASK 0xFFFFFFFFL
//HDP_XDP_D2H_RSVD_9
#define HDP_XDP_D2H_RSVD_9__RESERVED__SHIFT 0x0
#define HDP_XDP_D2H_RSVD_9__RESERVED_MASK 0xFFFFFFFFL
//HDP_XDP_D2H_RSVD_10
#define HDP_XDP_D2H_RSVD_10__RESERVED__SHIFT 0x0
#define HDP_XDP_D2H_RSVD_10__RESERVED_MASK 0xFFFFFFFFL
//HDP_XDP_D2H_RSVD_11
#define HDP_XDP_D2H_RSVD_11__RESERVED__SHIFT 0x0
#define HDP_XDP_D2H_RSVD_11__RESERVED_MASK 0xFFFFFFFFL
//HDP_XDP_D2H_RSVD_12
#define HDP_XDP_D2H_RSVD_12__RESERVED__SHIFT 0x0
#define HDP_XDP_D2H_RSVD_12__RESERVED_MASK 0xFFFFFFFFL
//HDP_XDP_D2H_RSVD_13
#define HDP_XDP_D2H_RSVD_13__RESERVED__SHIFT 0x0
#define HDP_XDP_D2H_RSVD_13__RESERVED_MASK 0xFFFFFFFFL
//HDP_XDP_D2H_RSVD_14
#define HDP_XDP_D2H_RSVD_14__RESERVED__SHIFT 0x0
#define HDP_XDP_D2H_RSVD_14__RESERVED_MASK 0xFFFFFFFFL
//HDP_XDP_D2H_RSVD_15
#define HDP_XDP_D2H_RSVD_15__RESERVED__SHIFT 0x0
#define HDP_XDP_D2H_RSVD_15__RESERVED_MASK 0xFFFFFFFFL
//HDP_XDP_D2H_RSVD_16
#define HDP_XDP_D2H_RSVD_16__RESERVED__SHIFT 0x0
#define HDP_XDP_D2H_RSVD_16__RESERVED_MASK 0xFFFFFFFFL
//HDP_XDP_D2H_RSVD_17
#define HDP_XDP_D2H_RSVD_17__RESERVED__SHIFT 0x0
#define HDP_XDP_D2H_RSVD_17__RESERVED_MASK 0xFFFFFFFFL
//HDP_XDP_D2H_RSVD_18
#define HDP_XDP_D2H_RSVD_18__RESERVED__SHIFT 0x0
#define HDP_XDP_D2H_RSVD_18__RESERVED_MASK 0xFFFFFFFFL
//HDP_XDP_D2H_RSVD_19
#define HDP_XDP_D2H_RSVD_19__RESERVED__SHIFT 0x0
#define HDP_XDP_D2H_RSVD_19__RESERVED_MASK 0xFFFFFFFFL
//HDP_XDP_D2H_RSVD_20
#define HDP_XDP_D2H_RSVD_20__RESERVED__SHIFT 0x0
#define HDP_XDP_D2H_RSVD_20__RESERVED_MASK 0xFFFFFFFFL
//HDP_XDP_D2H_RSVD_21
#define HDP_XDP_D2H_RSVD_21__RESERVED__SHIFT 0x0
#define HDP_XDP_D2H_RSVD_21__RESERVED_MASK 0xFFFFFFFFL
//HDP_XDP_D2H_RSVD_22
#define HDP_XDP_D2H_RSVD_22__RESERVED__SHIFT 0x0
#define HDP_XDP_D2H_RSVD_22__RESERVED_MASK 0xFFFFFFFFL
//HDP_XDP_D2H_RSVD_23
#define HDP_XDP_D2H_RSVD_23__RESERVED__SHIFT 0x0
#define HDP_XDP_D2H_RSVD_23__RESERVED_MASK 0xFFFFFFFFL
//HDP_XDP_D2H_RSVD_24
#define HDP_XDP_D2H_RSVD_24__RESERVED__SHIFT 0x0
#define HDP_XDP_D2H_RSVD_24__RESERVED_MASK 0xFFFFFFFFL
//HDP_XDP_D2H_RSVD_25
#define HDP_XDP_D2H_RSVD_25__RESERVED__SHIFT 0x0
#define HDP_XDP_D2H_RSVD_25__RESERVED_MASK 0xFFFFFFFFL
//HDP_XDP_D2H_RSVD_26
#define HDP_XDP_D2H_RSVD_26__RESERVED__SHIFT 0x0
#define HDP_XDP_D2H_RSVD_26__RESERVED_MASK 0xFFFFFFFFL
//HDP_XDP_D2H_RSVD_27
#define HDP_XDP_D2H_RSVD_27__RESERVED__SHIFT 0x0
#define HDP_XDP_D2H_RSVD_27__RESERVED_MASK 0xFFFFFFFFL
//HDP_XDP_D2H_RSVD_28
#define HDP_XDP_D2H_RSVD_28__RESERVED__SHIFT 0x0
#define HDP_XDP_D2H_RSVD_28__RESERVED_MASK 0xFFFFFFFFL
//HDP_XDP_D2H_RSVD_29
#define HDP_XDP_D2H_RSVD_29__RESERVED__SHIFT 0x0
#define HDP_XDP_D2H_RSVD_29__RESERVED_MASK 0xFFFFFFFFL
//HDP_XDP_D2H_RSVD_30
#define HDP_XDP_D2H_RSVD_30__RESERVED__SHIFT 0x0
#define HDP_XDP_D2H_RSVD_30__RESERVED_MASK 0xFFFFFFFFL
//HDP_XDP_D2H_RSVD_31
#define HDP_XDP_D2H_RSVD_31__RESERVED__SHIFT 0x0
#define HDP_XDP_D2H_RSVD_31__RESERVED_MASK 0xFFFFFFFFL
//HDP_XDP_D2H_RSVD_32
#define HDP_XDP_D2H_RSVD_32__RESERVED__SHIFT 0x0
#define HDP_XDP_D2H_RSVD_32__RESERVED_MASK 0xFFFFFFFFL
//HDP_XDP_D2H_RSVD_33
#define HDP_XDP_D2H_RSVD_33__RESERVED__SHIFT 0x0
#define HDP_XDP_D2H_RSVD_33__RESERVED_MASK 0xFFFFFFFFL
//HDP_XDP_D2H_RSVD_34
#define HDP_XDP_D2H_RSVD_34__RESERVED__SHIFT 0x0
#define HDP_XDP_D2H_RSVD_34__RESERVED_MASK 0xFFFFFFFFL
//HDP_XDP_DIRECT2HDP_LAST
#define HDP_XDP_DIRECT2HDP_LAST__RESERVED__SHIFT 0x0
#define HDP_XDP_DIRECT2HDP_LAST__RESERVED_MASK 0xFFFFFFFFL
//HDP_XDP_P2P_BAR_CFG
#define HDP_XDP_P2P_BAR_CFG__P2P_BAR_CFG_ADDR_SIZE__SHIFT 0x0
#define HDP_XDP_P2P_BAR_CFG__P2P_BAR_CFG_BAR_FROM__SHIFT 0x4
#define HDP_XDP_P2P_BAR_CFG__P2P_BAR_CFG_ADDR_SIZE_MASK 0x0000000FL
#define HDP_XDP_P2P_BAR_CFG__P2P_BAR_CFG_BAR_FROM_MASK 0x00000030L
//HDP_XDP_P2P_MBX_OFFSET
#define HDP_XDP_P2P_MBX_OFFSET__P2P_MBX_OFFSET__SHIFT 0x0
#define HDP_XDP_P2P_MBX_OFFSET__P2P_MBX_OFFSET_MASK 0x0001FFFFL
//HDP_XDP_P2P_MBX_ADDR0
#define HDP_XDP_P2P_MBX_ADDR0__VALID__SHIFT 0x0
#define HDP_XDP_P2P_MBX_ADDR0__ADDR_35_19__SHIFT 0x3
#define HDP_XDP_P2P_MBX_ADDR0__ADDR_39_36__SHIFT 0x14
#define HDP_XDP_P2P_MBX_ADDR0__ADDR_47_40__SHIFT 0x18
#define HDP_XDP_P2P_MBX_ADDR0__VALID_MASK 0x00000001L
#define HDP_XDP_P2P_MBX_ADDR0__ADDR_35_19_MASK 0x000FFFF8L
#define HDP_XDP_P2P_MBX_ADDR0__ADDR_39_36_MASK 0x00F00000L
#define HDP_XDP_P2P_MBX_ADDR0__ADDR_47_40_MASK 0xFF000000L
//HDP_XDP_P2P_MBX_ADDR1
#define HDP_XDP_P2P_MBX_ADDR1__VALID__SHIFT 0x0
#define HDP_XDP_P2P_MBX_ADDR1__ADDR_35_19__SHIFT 0x3
#define HDP_XDP_P2P_MBX_ADDR1__ADDR_39_36__SHIFT 0x14
#define HDP_XDP_P2P_MBX_ADDR1__ADDR_47_40__SHIFT 0x18
#define HDP_XDP_P2P_MBX_ADDR1__VALID_MASK 0x00000001L
#define HDP_XDP_P2P_MBX_ADDR1__ADDR_35_19_MASK 0x000FFFF8L
#define HDP_XDP_P2P_MBX_ADDR1__ADDR_39_36_MASK 0x00F00000L
#define HDP_XDP_P2P_MBX_ADDR1__ADDR_47_40_MASK 0xFF000000L
//HDP_XDP_P2P_MBX_ADDR2
#define HDP_XDP_P2P_MBX_ADDR2__VALID__SHIFT 0x0
#define HDP_XDP_P2P_MBX_ADDR2__ADDR_35_19__SHIFT 0x3
#define HDP_XDP_P2P_MBX_ADDR2__ADDR_39_36__SHIFT 0x14
#define HDP_XDP_P2P_MBX_ADDR2__ADDR_47_40__SHIFT 0x18
#define HDP_XDP_P2P_MBX_ADDR2__VALID_MASK 0x00000001L
#define HDP_XDP_P2P_MBX_ADDR2__ADDR_35_19_MASK 0x000FFFF8L
#define HDP_XDP_P2P_MBX_ADDR2__ADDR_39_36_MASK 0x00F00000L
#define HDP_XDP_P2P_MBX_ADDR2__ADDR_47_40_MASK 0xFF000000L
//HDP_XDP_P2P_MBX_ADDR3
#define HDP_XDP_P2P_MBX_ADDR3__VALID__SHIFT 0x0
#define HDP_XDP_P2P_MBX_ADDR3__ADDR_35_19__SHIFT 0x3
#define HDP_XDP_P2P_MBX_ADDR3__ADDR_39_36__SHIFT 0x14
#define HDP_XDP_P2P_MBX_ADDR3__ADDR_47_40__SHIFT 0x18
#define HDP_XDP_P2P_MBX_ADDR3__VALID_MASK 0x00000001L
#define HDP_XDP_P2P_MBX_ADDR3__ADDR_35_19_MASK 0x000FFFF8L
#define HDP_XDP_P2P_MBX_ADDR3__ADDR_39_36_MASK 0x00F00000L
#define HDP_XDP_P2P_MBX_ADDR3__ADDR_47_40_MASK 0xFF000000L
//HDP_XDP_P2P_MBX_ADDR4
#define HDP_XDP_P2P_MBX_ADDR4__VALID__SHIFT 0x0
#define HDP_XDP_P2P_MBX_ADDR4__ADDR_35_19__SHIFT 0x3
#define HDP_XDP_P2P_MBX_ADDR4__ADDR_39_36__SHIFT 0x14
#define HDP_XDP_P2P_MBX_ADDR4__ADDR_47_40__SHIFT 0x18
#define HDP_XDP_P2P_MBX_ADDR4__VALID_MASK 0x00000001L
#define HDP_XDP_P2P_MBX_ADDR4__ADDR_35_19_MASK 0x000FFFF8L
#define HDP_XDP_P2P_MBX_ADDR4__ADDR_39_36_MASK 0x00F00000L
#define HDP_XDP_P2P_MBX_ADDR4__ADDR_47_40_MASK 0xFF000000L
//HDP_XDP_P2P_MBX_ADDR5
#define HDP_XDP_P2P_MBX_ADDR5__VALID__SHIFT 0x0
#define HDP_XDP_P2P_MBX_ADDR5__ADDR_35_19__SHIFT 0x3
#define HDP_XDP_P2P_MBX_ADDR5__ADDR_39_36__SHIFT 0x14
#define HDP_XDP_P2P_MBX_ADDR5__ADDR_47_40__SHIFT 0x18
#define HDP_XDP_P2P_MBX_ADDR5__VALID_MASK 0x00000001L
#define HDP_XDP_P2P_MBX_ADDR5__ADDR_35_19_MASK 0x000FFFF8L
#define HDP_XDP_P2P_MBX_ADDR5__ADDR_39_36_MASK 0x00F00000L
#define HDP_XDP_P2P_MBX_ADDR5__ADDR_47_40_MASK 0xFF000000L
//HDP_XDP_P2P_MBX_ADDR6
#define HDP_XDP_P2P_MBX_ADDR6__VALID__SHIFT 0x0
#define HDP_XDP_P2P_MBX_ADDR6__ADDR_35_19__SHIFT 0x3
#define HDP_XDP_P2P_MBX_ADDR6__ADDR_39_36__SHIFT 0x14
#define HDP_XDP_P2P_MBX_ADDR6__ADDR_47_40__SHIFT 0x18
#define HDP_XDP_P2P_MBX_ADDR6__VALID_MASK 0x00000001L
#define HDP_XDP_P2P_MBX_ADDR6__ADDR_35_19_MASK 0x000FFFF8L
#define HDP_XDP_P2P_MBX_ADDR6__ADDR_39_36_MASK 0x00F00000L
#define HDP_XDP_P2P_MBX_ADDR6__ADDR_47_40_MASK 0xFF000000L
//HDP_XDP_HDP_MBX_MC_CFG
#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_QOS__SHIFT 0x0
#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_SWAP__SHIFT 0x4
#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_VMID__SHIFT 0x8
#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_RO__SHIFT 0xc
#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_GCC__SHIFT 0xd
#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_SNOOP__SHIFT 0xe
#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_QOS_MASK 0x0000000FL
#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_SWAP_MASK 0x00000030L
#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_VMID_MASK 0x00000F00L
#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_RO_MASK 0x00001000L
#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_GCC_MASK 0x00002000L
#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_SNOOP_MASK 0x00004000L
//HDP_XDP_HDP_MC_CFG
#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SNOOP_OVERRIDE__SHIFT 0x0
#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_GCC_OVERRIDE__SHIFT 0x1
#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_RO_OVERRIDE__SHIFT 0x2
#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SNOOP__SHIFT 0x3
#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SWAP__SHIFT 0x4
#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_VMID__SHIFT 0x8
#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_RO__SHIFT 0xc
#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_GCC__SHIFT 0xd
#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_XDP_HIGHER_PRI_THRESH__SHIFT 0xe
#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SNOOP_OVERRIDE_MASK 0x00000001L
#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_GCC_OVERRIDE_MASK 0x00000002L
#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_RO_OVERRIDE_MASK 0x00000004L
#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SNOOP_MASK 0x00000008L
#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SWAP_MASK 0x00000030L
#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_VMID_MASK 0x00000F00L
#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_RO_MASK 0x00001000L
#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_GCC_MASK 0x00002000L
#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_XDP_HIGHER_PRI_THRESH_MASK 0x000FC000L
//HDP_XDP_HST_CFG
#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_EN__SHIFT 0x0
#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_TIMER__SHIFT 0x1
#define HDP_XDP_HST_CFG__HST_CFG_WR_BURST_EN__SHIFT 0x3
#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_64B_EN__SHIFT 0x4
#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_TIMER_PRELOAD_CFG__SHIFT 0x5
#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_EN_MASK 0x00000001L
#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_TIMER_MASK 0x00000006L
#define HDP_XDP_HST_CFG__HST_CFG_WR_BURST_EN_MASK 0x00000008L
#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_64B_EN_MASK 0x00000010L
#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_TIMER_PRELOAD_CFG_MASK 0x00000020L
//HDP_XDP_HDP_IPH_CFG
#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_INVERSE_PEER_TAG_MATCHING__SHIFT 0xc
#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_P2P_RD_EN__SHIFT 0xd
#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_INVERSE_PEER_TAG_MATCHING_MASK 0x00001000L
#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_P2P_RD_EN_MASK 0x00002000L
//HDP_XDP_P2P_BAR0
#define HDP_XDP_P2P_BAR0__ADDR__SHIFT 0x0
#define HDP_XDP_P2P_BAR0__FLUSH__SHIFT 0x10
#define HDP_XDP_P2P_BAR0__VALID__SHIFT 0x14
#define HDP_XDP_P2P_BAR0__ADDR_MASK 0x0000FFFFL
#define HDP_XDP_P2P_BAR0__FLUSH_MASK 0x000F0000L
#define HDP_XDP_P2P_BAR0__VALID_MASK 0x00100000L
//HDP_XDP_P2P_BAR1
#define HDP_XDP_P2P_BAR1__ADDR__SHIFT 0x0
#define HDP_XDP_P2P_BAR1__FLUSH__SHIFT 0x10
#define HDP_XDP_P2P_BAR1__VALID__SHIFT 0x14
#define HDP_XDP_P2P_BAR1__ADDR_MASK 0x0000FFFFL
#define HDP_XDP_P2P_BAR1__FLUSH_MASK 0x000F0000L
#define HDP_XDP_P2P_BAR1__VALID_MASK 0x00100000L
//HDP_XDP_P2P_BAR2
#define HDP_XDP_P2P_BAR2__ADDR__SHIFT 0x0
#define HDP_XDP_P2P_BAR2__FLUSH__SHIFT 0x10
#define HDP_XDP_P2P_BAR2__VALID__SHIFT 0x14
#define HDP_XDP_P2P_BAR2__ADDR_MASK 0x0000FFFFL
#define HDP_XDP_P2P_BAR2__FLUSH_MASK 0x000F0000L
#define HDP_XDP_P2P_BAR2__VALID_MASK 0x00100000L
//HDP_XDP_P2P_BAR3
#define HDP_XDP_P2P_BAR3__ADDR__SHIFT 0x0
#define HDP_XDP_P2P_BAR3__FLUSH__SHIFT 0x10
#define HDP_XDP_P2P_BAR3__VALID__SHIFT 0x14
#define HDP_XDP_P2P_BAR3__ADDR_MASK 0x0000FFFFL
#define HDP_XDP_P2P_BAR3__FLUSH_MASK 0x000F0000L
#define HDP_XDP_P2P_BAR3__VALID_MASK 0x00100000L
//HDP_XDP_P2P_BAR4
#define HDP_XDP_P2P_BAR4__ADDR__SHIFT 0x0
#define HDP_XDP_P2P_BAR4__FLUSH__SHIFT 0x10
#define HDP_XDP_P2P_BAR4__VALID__SHIFT 0x14
#define HDP_XDP_P2P_BAR4__ADDR_MASK 0x0000FFFFL
#define HDP_XDP_P2P_BAR4__FLUSH_MASK 0x000F0000L
#define HDP_XDP_P2P_BAR4__VALID_MASK 0x00100000L
//HDP_XDP_P2P_BAR5
#define HDP_XDP_P2P_BAR5__ADDR__SHIFT 0x0
#define HDP_XDP_P2P_BAR5__FLUSH__SHIFT 0x10
#define HDP_XDP_P2P_BAR5__VALID__SHIFT 0x14
#define HDP_XDP_P2P_BAR5__ADDR_MASK 0x0000FFFFL
#define HDP_XDP_P2P_BAR5__FLUSH_MASK 0x000F0000L
#define HDP_XDP_P2P_BAR5__VALID_MASK 0x00100000L
//HDP_XDP_P2P_BAR6
#define HDP_XDP_P2P_BAR6__ADDR__SHIFT 0x0
#define HDP_XDP_P2P_BAR6__FLUSH__SHIFT 0x10
#define HDP_XDP_P2P_BAR6__VALID__SHIFT 0x14
#define HDP_XDP_P2P_BAR6__ADDR_MASK 0x0000FFFFL
#define HDP_XDP_P2P_BAR6__FLUSH_MASK 0x000F0000L
#define HDP_XDP_P2P_BAR6__VALID_MASK 0x00100000L
//HDP_XDP_P2P_BAR7
#define HDP_XDP_P2P_BAR7__ADDR__SHIFT 0x0
#define HDP_XDP_P2P_BAR7__FLUSH__SHIFT 0x10
#define HDP_XDP_P2P_BAR7__VALID__SHIFT 0x14
#define HDP_XDP_P2P_BAR7__ADDR_MASK 0x0000FFFFL
#define HDP_XDP_P2P_BAR7__FLUSH_MASK 0x000F0000L
#define HDP_XDP_P2P_BAR7__VALID_MASK 0x00100000L
//HDP_XDP_FLUSH_ARMED_STS
#define HDP_XDP_FLUSH_ARMED_STS__FLUSH_ARMED_STS__SHIFT 0x0
#define HDP_XDP_FLUSH_ARMED_STS__FLUSH_ARMED_STS_MASK 0xFFFFFFFFL
//HDP_XDP_FLUSH_CNTR0_STS
#define HDP_XDP_FLUSH_CNTR0_STS__FLUSH_CNTR0_STS__SHIFT 0x0
#define HDP_XDP_FLUSH_CNTR0_STS__FLUSH_CNTR0_STS_MASK 0x03FFFFFFL
//HDP_XDP_STICKY
#define HDP_XDP_STICKY__STICKY_STS__SHIFT 0x0
#define HDP_XDP_STICKY__STICKY_W1C__SHIFT 0x10
#define HDP_XDP_STICKY__STICKY_STS_MASK 0x0000FFFFL
#define HDP_XDP_STICKY__STICKY_W1C_MASK 0xFFFF0000L
//HDP_XDP_CHKN
#define HDP_XDP_CHKN__CHKN_0_RSVD__SHIFT 0x0
#define HDP_XDP_CHKN__CHKN_1_RSVD__SHIFT 0x8
#define HDP_XDP_CHKN__CHKN_2_RSVD__SHIFT 0x10
#define HDP_XDP_CHKN__CHKN_3_RSVD__SHIFT 0x18
#define HDP_XDP_CHKN__CHKN_0_RSVD_MASK 0x000000FFL
#define HDP_XDP_CHKN__CHKN_1_RSVD_MASK 0x0000FF00L
#define HDP_XDP_CHKN__CHKN_2_RSVD_MASK 0x00FF0000L
#define HDP_XDP_CHKN__CHKN_3_RSVD_MASK 0xFF000000L
//HDP_XDP_BARS_ADDR_39_36
#define HDP_XDP_BARS_ADDR_39_36__BAR0_ADDR_39_36__SHIFT 0x0
#define HDP_XDP_BARS_ADDR_39_36__BAR1_ADDR_39_36__SHIFT 0x4
#define HDP_XDP_BARS_ADDR_39_36__BAR2_ADDR_39_36__SHIFT 0x8
#define HDP_XDP_BARS_ADDR_39_36__BAR3_ADDR_39_36__SHIFT 0xc
#define HDP_XDP_BARS_ADDR_39_36__BAR4_ADDR_39_36__SHIFT 0x10
#define HDP_XDP_BARS_ADDR_39_36__BAR5_ADDR_39_36__SHIFT 0x14
#define HDP_XDP_BARS_ADDR_39_36__BAR6_ADDR_39_36__SHIFT 0x18
#define HDP_XDP_BARS_ADDR_39_36__BAR7_ADDR_39_36__SHIFT 0x1c
#define HDP_XDP_BARS_ADDR_39_36__BAR0_ADDR_39_36_MASK 0x0000000FL
#define HDP_XDP_BARS_ADDR_39_36__BAR1_ADDR_39_36_MASK 0x000000F0L
#define HDP_XDP_BARS_ADDR_39_36__BAR2_ADDR_39_36_MASK 0x00000F00L
#define HDP_XDP_BARS_ADDR_39_36__BAR3_ADDR_39_36_MASK 0x0000F000L
#define HDP_XDP_BARS_ADDR_39_36__BAR4_ADDR_39_36_MASK 0x000F0000L
#define HDP_XDP_BARS_ADDR_39_36__BAR5_ADDR_39_36_MASK 0x00F00000L
#define HDP_XDP_BARS_ADDR_39_36__BAR6_ADDR_39_36_MASK 0x0F000000L
#define HDP_XDP_BARS_ADDR_39_36__BAR7_ADDR_39_36_MASK 0xF0000000L
//HDP_XDP_MC_VM_FB_LOCATION_BASE
#define HDP_XDP_MC_VM_FB_LOCATION_BASE__FB_BASE__SHIFT 0x0
#define HDP_XDP_MC_VM_FB_LOCATION_BASE__FB_BASE_MASK 0x03FFFFFFL
//HDP_XDP_GPU_IOV_VIOLATION_LOG
#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
#define HDP_XDP_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
#define HDP_XDP_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
#define HDP_XDP_GPU_IOV_VIOLATION_LOG__OPCODE__SHIFT 0x12
#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x13
#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x14
#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
#define HDP_XDP_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
#define HDP_XDP_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x0003FFFCL
#define HDP_XDP_GPU_IOV_VIOLATION_LOG__OPCODE_MASK 0x00040000L
#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00080000L
#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x01F00000L
//HDP_XDP_GPU_IOV_VIOLATION_LOG2
#define HDP_XDP_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID__SHIFT 0x0
#define HDP_XDP_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID_MASK 0x000003FFL
#endif

Some files were not shown because too many files have changed in this diff Show More