mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-25 11:55:37 +00:00
drm/amdgpu: switch sdma buffer function tear down to a helper
Switch all of the SDMA implementations to use the helper to tear down the ttm buffer manager. Tested-by: Bokun Zhang <Bokun.Zhang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
e5da651985
commit
571c053658
10 changed files with 36 additions and 59 deletions
|
@ -285,3 +285,24 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
|
|||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
void amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ring *sdma;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
if (adev->sdma.has_page_queue) {
|
||||
sdma = &adev->sdma.instance[i].page;
|
||||
if (adev->mman.buffer_funcs_ring == sdma) {
|
||||
amdgpu_ttm_set_buffer_funcs_status(adev, false);
|
||||
break;
|
||||
}
|
||||
}
|
||||
sdma = &adev->sdma.instance[i].ring;
|
||||
if (adev->mman.buffer_funcs_ring == sdma) {
|
||||
amdgpu_ttm_set_buffer_funcs_status(adev, false);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -128,4 +128,6 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
|
|||
char *fw_name, u32 instance, bool duplicate);
|
||||
void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev,
|
||||
bool duplicate);
|
||||
void amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device *adev);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -309,14 +309,10 @@ static void cik_sdma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
|
|||
*/
|
||||
static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
|
||||
struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
|
||||
u32 rb_cntl;
|
||||
int i;
|
||||
|
||||
if ((adev->mman.buffer_funcs_ring == sdma0) ||
|
||||
(adev->mman.buffer_funcs_ring == sdma1))
|
||||
amdgpu_ttm_set_buffer_funcs_status(adev, false);
|
||||
amdgpu_sdma_unset_buffer_funcs_helper(adev);
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
|
||||
|
|
|
@ -342,14 +342,10 @@ static void sdma_v2_4_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
|
|||
*/
|
||||
static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
|
||||
struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
|
||||
u32 rb_cntl, ib_cntl;
|
||||
int i;
|
||||
|
||||
if ((adev->mman.buffer_funcs_ring == sdma0) ||
|
||||
(adev->mman.buffer_funcs_ring == sdma1))
|
||||
amdgpu_ttm_set_buffer_funcs_status(adev, false);
|
||||
amdgpu_sdma_unset_buffer_funcs_helper(adev);
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
|
||||
|
|
|
@ -516,14 +516,10 @@ static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
|
|||
*/
|
||||
static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
|
||||
struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
|
||||
u32 rb_cntl, ib_cntl;
|
||||
int i;
|
||||
|
||||
if ((adev->mman.buffer_funcs_ring == sdma0) ||
|
||||
(adev->mman.buffer_funcs_ring == sdma1))
|
||||
amdgpu_ttm_set_buffer_funcs_status(adev, false);
|
||||
amdgpu_sdma_unset_buffer_funcs_helper(adev);
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
|
||||
|
|
|
@ -915,18 +915,12 @@ static void sdma_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
|
|||
*/
|
||||
static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
|
||||
u32 rb_cntl, ib_cntl;
|
||||
int i, unset = 0;
|
||||
int i;
|
||||
|
||||
amdgpu_sdma_unset_buffer_funcs_helper(adev);
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
sdma[i] = &adev->sdma.instance[i].ring;
|
||||
|
||||
if ((adev->mman.buffer_funcs_ring == sdma[i]) && unset != 1) {
|
||||
amdgpu_ttm_set_buffer_funcs_status(adev, false);
|
||||
unset = 1;
|
||||
}
|
||||
|
||||
rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL);
|
||||
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
|
||||
WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
|
||||
|
@ -957,20 +951,12 @@ static void sdma_v4_0_rlc_stop(struct amdgpu_device *adev)
|
|||
*/
|
||||
static void sdma_v4_0_page_stop(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
|
||||
u32 rb_cntl, ib_cntl;
|
||||
int i;
|
||||
bool unset = false;
|
||||
|
||||
amdgpu_sdma_unset_buffer_funcs_helper(adev);
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
sdma[i] = &adev->sdma.instance[i].page;
|
||||
|
||||
if ((adev->mman.buffer_funcs_ring == sdma[i]) &&
|
||||
(!unset)) {
|
||||
amdgpu_ttm_set_buffer_funcs_status(adev, false);
|
||||
unset = true;
|
||||
}
|
||||
|
||||
rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL);
|
||||
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL,
|
||||
RB_ENABLE, 0);
|
||||
|
|
|
@ -584,14 +584,10 @@ static void sdma_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
|
|||
*/
|
||||
static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
|
||||
struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
|
||||
u32 rb_cntl, ib_cntl;
|
||||
int i;
|
||||
|
||||
if ((adev->mman.buffer_funcs_ring == sdma0) ||
|
||||
(adev->mman.buffer_funcs_ring == sdma1))
|
||||
amdgpu_ttm_set_buffer_funcs_status(adev, false);
|
||||
amdgpu_sdma_unset_buffer_funcs_helper(adev);
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
|
||||
|
|
|
@ -414,18 +414,10 @@ static void sdma_v5_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
|
|||
*/
|
||||
static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
|
||||
struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
|
||||
struct amdgpu_ring *sdma2 = &adev->sdma.instance[2].ring;
|
||||
struct amdgpu_ring *sdma3 = &adev->sdma.instance[3].ring;
|
||||
u32 rb_cntl, ib_cntl;
|
||||
int i;
|
||||
|
||||
if ((adev->mman.buffer_funcs_ring == sdma0) ||
|
||||
(adev->mman.buffer_funcs_ring == sdma1) ||
|
||||
(adev->mman.buffer_funcs_ring == sdma2) ||
|
||||
(adev->mman.buffer_funcs_ring == sdma3))
|
||||
amdgpu_ttm_set_buffer_funcs_status(adev, false);
|
||||
amdgpu_sdma_unset_buffer_funcs_helper(adev);
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
|
||||
|
|
|
@ -398,14 +398,10 @@ static void sdma_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
|
|||
*/
|
||||
static void sdma_v6_0_gfx_stop(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
|
||||
struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
|
||||
u32 rb_cntl, ib_cntl;
|
||||
int i;
|
||||
|
||||
if ((adev->mman.buffer_funcs_ring == sdma0) ||
|
||||
(adev->mman.buffer_funcs_ring == sdma1))
|
||||
amdgpu_ttm_set_buffer_funcs_status(adev, false);
|
||||
amdgpu_sdma_unset_buffer_funcs_helper(adev);
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
rb_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL));
|
||||
|
@ -415,9 +411,6 @@ static void sdma_v6_0_gfx_stop(struct amdgpu_device *adev)
|
|||
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_ENABLE, 0);
|
||||
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl);
|
||||
}
|
||||
|
||||
sdma0->sched.ready = false;
|
||||
sdma1->sched.ready = false;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -116,15 +116,14 @@ static void si_dma_stop(struct amdgpu_device *adev)
|
|||
u32 rb_cntl;
|
||||
unsigned i;
|
||||
|
||||
amdgpu_sdma_unset_buffer_funcs_helper(adev);
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
ring = &adev->sdma.instance[i].ring;
|
||||
/* dma0 */
|
||||
rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]);
|
||||
rb_cntl &= ~DMA_RB_ENABLE;
|
||||
WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
|
||||
|
||||
if (adev->mman.buffer_funcs_ring == ring)
|
||||
amdgpu_ttm_set_buffer_funcs_status(adev, false);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue