drm/amdgpu/dce8: simplify hpd code

Use an address offset like other dce code.

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Alex Deucher 2016-09-28 12:59:11 -04:00
parent 0f66356d24
commit 2285b91cd2
2 changed files with 54 additions and 190 deletions

View file

@ -43,6 +43,14 @@
#define CRTC4_REGISTER_OFFSET (0x477c - 0x1b7c) #define CRTC4_REGISTER_OFFSET (0x477c - 0x1b7c)
#define CRTC5_REGISTER_OFFSET (0x4a7c - 0x1b7c) #define CRTC5_REGISTER_OFFSET (0x4a7c - 0x1b7c)
/* hpd instance offsets */
#define HPD0_REGISTER_OFFSET (0x1807 - 0x1807)
#define HPD1_REGISTER_OFFSET (0x180a - 0x1807)
#define HPD2_REGISTER_OFFSET (0x180d - 0x1807)
#define HPD3_REGISTER_OFFSET (0x1810 - 0x1807)
#define HPD4_REGISTER_OFFSET (0x1813 - 0x1807)
#define HPD5_REGISTER_OFFSET (0x1816 - 0x1807)
#define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001 #define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001
#define HAWAII_GB_ADDR_CONFIG_GOLDEN 0x12011003 #define HAWAII_GB_ADDR_CONFIG_GOLDEN 0x12011003

View file

@ -56,6 +56,16 @@ static const u32 crtc_offsets[6] =
CRTC5_REGISTER_OFFSET CRTC5_REGISTER_OFFSET
}; };
static const u32 hpd_offsets[] =
{
HPD0_REGISTER_OFFSET,
HPD1_REGISTER_OFFSET,
HPD2_REGISTER_OFFSET,
HPD3_REGISTER_OFFSET,
HPD4_REGISTER_OFFSET,
HPD5_REGISTER_OFFSET
};
static const uint32_t dig_offsets[] = { static const uint32_t dig_offsets[] = {
CRTC0_REGISTER_OFFSET, CRTC0_REGISTER_OFFSET,
CRTC1_REGISTER_OFFSET, CRTC1_REGISTER_OFFSET,
@ -104,15 +114,6 @@ static const struct {
.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
} }; } };
static const uint32_t hpd_int_control_offsets[6] = {
mmDC_HPD1_INT_CONTROL,
mmDC_HPD2_INT_CONTROL,
mmDC_HPD3_INT_CONTROL,
mmDC_HPD4_INT_CONTROL,
mmDC_HPD5_INT_CONTROL,
mmDC_HPD6_INT_CONTROL,
};
static u32 dce_v8_0_audio_endpt_rreg(struct amdgpu_device *adev, static u32 dce_v8_0_audio_endpt_rreg(struct amdgpu_device *adev,
u32 block_offset, u32 reg) u32 block_offset, u32 reg)
{ {
@ -278,34 +279,12 @@ static bool dce_v8_0_hpd_sense(struct amdgpu_device *adev,
{ {
bool connected = false; bool connected = false;
switch (hpd) { if (hpd >= adev->mode_info.num_hpd)
case AMDGPU_HPD_1: return connected;
if (RREG32(mmDC_HPD1_INT_STATUS) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
connected = true; if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) &
break; DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
case AMDGPU_HPD_2: connected = true;
if (RREG32(mmDC_HPD2_INT_STATUS) & DC_HPD2_INT_STATUS__DC_HPD2_SENSE_MASK)
connected = true;
break;
case AMDGPU_HPD_3:
if (RREG32(mmDC_HPD3_INT_STATUS) & DC_HPD3_INT_STATUS__DC_HPD3_SENSE_MASK)
connected = true;
break;
case AMDGPU_HPD_4:
if (RREG32(mmDC_HPD4_INT_STATUS) & DC_HPD4_INT_STATUS__DC_HPD4_SENSE_MASK)
connected = true;
break;
case AMDGPU_HPD_5:
if (RREG32(mmDC_HPD5_INT_STATUS) & DC_HPD5_INT_STATUS__DC_HPD5_SENSE_MASK)
connected = true;
break;
case AMDGPU_HPD_6:
if (RREG32(mmDC_HPD6_INT_STATUS) & DC_HPD6_INT_STATUS__DC_HPD6_SENSE_MASK)
connected = true;
break;
default:
break;
}
return connected; return connected;
} }
@ -324,58 +303,15 @@ static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev,
u32 tmp; u32 tmp;
bool connected = dce_v8_0_hpd_sense(adev, hpd); bool connected = dce_v8_0_hpd_sense(adev, hpd);
switch (hpd) { if (hpd >= adev->mode_info.num_hpd)
case AMDGPU_HPD_1: return;
tmp = RREG32(mmDC_HPD1_INT_CONTROL);
if (connected) tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK; if (connected)
else tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK; else
WREG32(mmDC_HPD1_INT_CONTROL, tmp); tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
break; WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
case AMDGPU_HPD_2:
tmp = RREG32(mmDC_HPD2_INT_CONTROL);
if (connected)
tmp &= ~DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY_MASK;
else
tmp |= DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY_MASK;
WREG32(mmDC_HPD2_INT_CONTROL, tmp);
break;
case AMDGPU_HPD_3:
tmp = RREG32(mmDC_HPD3_INT_CONTROL);
if (connected)
tmp &= ~DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY_MASK;
else
tmp |= DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY_MASK;
WREG32(mmDC_HPD3_INT_CONTROL, tmp);
break;
case AMDGPU_HPD_4:
tmp = RREG32(mmDC_HPD4_INT_CONTROL);
if (connected)
tmp &= ~DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY_MASK;
else
tmp |= DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY_MASK;
WREG32(mmDC_HPD4_INT_CONTROL, tmp);
break;
case AMDGPU_HPD_5:
tmp = RREG32(mmDC_HPD5_INT_CONTROL);
if (connected)
tmp &= ~DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY_MASK;
else
tmp |= DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY_MASK;
WREG32(mmDC_HPD5_INT_CONTROL, tmp);
break;
case AMDGPU_HPD_6:
tmp = RREG32(mmDC_HPD6_INT_CONTROL);
if (connected)
tmp &= ~DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY_MASK;
else
tmp |= DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY_MASK;
WREG32(mmDC_HPD6_INT_CONTROL, tmp);
break;
default:
break;
}
} }
/** /**
@ -397,28 +333,10 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) { list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
switch (amdgpu_connector->hpd.hpd) { if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
case AMDGPU_HPD_1: continue;
WREG32(mmDC_HPD1_CONTROL, tmp);
break; WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
case AMDGPU_HPD_2:
WREG32(mmDC_HPD2_CONTROL, tmp);
break;
case AMDGPU_HPD_3:
WREG32(mmDC_HPD3_CONTROL, tmp);
break;
case AMDGPU_HPD_4:
WREG32(mmDC_HPD4_CONTROL, tmp);
break;
case AMDGPU_HPD_5:
WREG32(mmDC_HPD5_CONTROL, tmp);
break;
case AMDGPU_HPD_6:
WREG32(mmDC_HPD6_CONTROL, tmp);
break;
default:
break;
}
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
@ -427,34 +345,9 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
* https://bugzilla.redhat.com/show_bug.cgi?id=726143 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
* also avoid interrupt storms during dpms. * also avoid interrupt storms during dpms.
*/ */
u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl; tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
switch (amdgpu_connector->hpd.hpd) { WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
case AMDGPU_HPD_1:
dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL;
break;
case AMDGPU_HPD_2:
dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL;
break;
case AMDGPU_HPD_3:
dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL;
break;
case AMDGPU_HPD_4:
dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL;
break;
case AMDGPU_HPD_5:
dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL;
break;
case AMDGPU_HPD_6:
dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL;
break;
default:
continue;
}
dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
continue; continue;
} }
@ -479,28 +372,11 @@ static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) { list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
switch (amdgpu_connector->hpd.hpd) { if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
case AMDGPU_HPD_1: continue;
WREG32(mmDC_HPD1_CONTROL, 0);
break; WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
case AMDGPU_HPD_2:
WREG32(mmDC_HPD2_CONTROL, 0);
break;
case AMDGPU_HPD_3:
WREG32(mmDC_HPD3_CONTROL, 0);
break;
case AMDGPU_HPD_4:
WREG32(mmDC_HPD4_CONTROL, 0);
break;
case AMDGPU_HPD_5:
WREG32(mmDC_HPD5_CONTROL, 0);
break;
case AMDGPU_HPD_6:
WREG32(mmDC_HPD6_CONTROL, 0);
break;
default:
break;
}
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
} }
} }
@ -3204,42 +3080,23 @@ static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
unsigned type, unsigned type,
enum amdgpu_interrupt_state state) enum amdgpu_interrupt_state state)
{ {
u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl; u32 dc_hpd_int_cntl;
switch (type) { if (type >= adev->mode_info.num_hpd) {
case AMDGPU_HPD_1:
dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL;
break;
case AMDGPU_HPD_2:
dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL;
break;
case AMDGPU_HPD_3:
dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL;
break;
case AMDGPU_HPD_4:
dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL;
break;
case AMDGPU_HPD_5:
dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL;
break;
case AMDGPU_HPD_6:
dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL;
break;
default:
DRM_DEBUG("invalid hdp %d\n", type); DRM_DEBUG("invalid hdp %d\n", type);
return 0; return 0;
} }
switch (state) { switch (state) {
case AMDGPU_IRQ_STATE_DISABLE: case AMDGPU_IRQ_STATE_DISABLE:
dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg); dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK; dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl); WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
break; break;
case AMDGPU_IRQ_STATE_ENABLE: case AMDGPU_IRQ_STATE_ENABLE:
dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg); dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK; dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl); WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
break; break;
default: default:
break; break;
@ -3412,7 +3269,7 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source, struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry) struct amdgpu_iv_entry *entry)
{ {
uint32_t disp_int, mask, int_control, tmp; uint32_t disp_int, mask, tmp;
unsigned hpd; unsigned hpd;
if (entry->src_data >= adev->mode_info.num_hpd) { if (entry->src_data >= adev->mode_info.num_hpd) {
@ -3423,12 +3280,11 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
hpd = entry->src_data; hpd = entry->src_data;
disp_int = RREG32(interrupt_status_offsets[hpd].reg); disp_int = RREG32(interrupt_status_offsets[hpd].reg);
mask = interrupt_status_offsets[hpd].hpd; mask = interrupt_status_offsets[hpd].hpd;
int_control = hpd_int_control_offsets[hpd];
if (disp_int & mask) { if (disp_int & mask) {
tmp = RREG32(int_control); tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK; tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
WREG32(int_control, tmp); WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
schedule_work(&adev->hotplug_work); schedule_work(&adev->hotplug_work);
DRM_DEBUG("IH: HPD%d\n", hpd + 1); DRM_DEBUG("IH: HPD%d\n", hpd + 1);
} }