drm/amdgpu: convert gfx.kiq to array type (v3)

v1: more kiq instances are a available in SOC (Le)
v2: squash commits to avoid breaking the build (Le)
v3: make the conversion for gfx/mec v11_0 (Hawking)

Signed-off-by: Le Ma <le.ma@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Le Ma 2022-05-24 10:51:43 +08:00 committed by Alex Deucher
parent 20c3dffdcc
commit 277bd3371f
16 changed files with 122 additions and 122 deletions

View File

@ -288,7 +288,7 @@ static int kgd_hiq_mqd_load(struct amdgpu_device *adev, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
uint32_t doorbell_off)
{
struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
struct v10_compute_mqd *m;
uint32_t mec, pipe;
int r;
@ -303,7 +303,7 @@ static int kgd_hiq_mqd_load(struct amdgpu_device *adev, void *mqd,
pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
mec, pipe, queue_id);
spin_lock(&adev->gfx.kiq.ring_lock);
spin_lock(&adev->gfx.kiq[0].ring_lock);
r = amdgpu_ring_alloc(kiq_ring, 7);
if (r) {
pr_err("Failed to alloc KIQ (%d).\n", r);
@ -330,7 +330,7 @@ static int kgd_hiq_mqd_load(struct amdgpu_device *adev, void *mqd,
amdgpu_ring_commit(kiq_ring);
out_unlock:
spin_unlock(&adev->gfx.kiq.ring_lock);
spin_unlock(&adev->gfx.kiq[0].ring_lock);
release_queue(adev);
return r;

View File

@ -275,7 +275,7 @@ static int hiq_mqd_load_v10_3(struct amdgpu_device *adev, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
uint32_t doorbell_off)
{
struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
struct v10_compute_mqd *m;
uint32_t mec, pipe;
int r;
@ -290,7 +290,7 @@ static int hiq_mqd_load_v10_3(struct amdgpu_device *adev, void *mqd,
pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
mec, pipe, queue_id);
spin_lock(&adev->gfx.kiq.ring_lock);
spin_lock(&adev->gfx.kiq[0].ring_lock);
r = amdgpu_ring_alloc(kiq_ring, 7);
if (r) {
pr_err("Failed to alloc KIQ (%d).\n", r);
@ -317,7 +317,7 @@ static int hiq_mqd_load_v10_3(struct amdgpu_device *adev, void *mqd,
amdgpu_ring_commit(kiq_ring);
out_unlock:
spin_unlock(&adev->gfx.kiq.ring_lock);
spin_unlock(&adev->gfx.kiq[0].ring_lock);
release_queue(adev);
return r;

View File

@ -260,7 +260,7 @@ static int hiq_mqd_load_v11(struct amdgpu_device *adev, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
uint32_t doorbell_off)
{
struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
struct v11_compute_mqd *m;
uint32_t mec, pipe;
int r;
@ -275,7 +275,7 @@ static int hiq_mqd_load_v11(struct amdgpu_device *adev, void *mqd,
pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
mec, pipe, queue_id);
spin_lock(&adev->gfx.kiq.ring_lock);
spin_lock(&adev->gfx.kiq[0].ring_lock);
r = amdgpu_ring_alloc(kiq_ring, 7);
if (r) {
pr_err("Failed to alloc KIQ (%d).\n", r);
@ -302,7 +302,7 @@ static int hiq_mqd_load_v11(struct amdgpu_device *adev, void *mqd,
amdgpu_ring_commit(kiq_ring);
out_unlock:
spin_unlock(&adev->gfx.kiq.ring_lock);
spin_unlock(&adev->gfx.kiq[0].ring_lock);
release_queue(adev);
return r;

View File

@ -300,7 +300,7 @@ int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
uint32_t doorbell_off)
{
struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
struct v9_mqd *m;
uint32_t mec, pipe;
int r;
@ -315,7 +315,7 @@ int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd,
pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
mec, pipe, queue_id);
spin_lock(&adev->gfx.kiq.ring_lock);
spin_lock(&adev->gfx.kiq[0].ring_lock);
r = amdgpu_ring_alloc(kiq_ring, 7);
if (r) {
pr_err("Failed to alloc KIQ (%d).\n", r);
@ -342,7 +342,7 @@ int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd,
amdgpu_ring_commit(kiq_ring);
out_unlock:
spin_unlock(&adev->gfx.kiq.ring_lock);
spin_unlock(&adev->gfx.kiq[0].ring_lock);
release_queue(adev);
return r;

View File

@ -296,7 +296,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_irq_src *irq)
{
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
int r = 0;
spin_lock_init(&kiq->ring_lock);
@ -329,7 +329,7 @@ void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring)
void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev)
{
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
amdgpu_bo_free_kernel(&kiq->eop_obj, &kiq->eop_gpu_addr, NULL);
}
@ -339,7 +339,7 @@ int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
{
int r;
u32 *hpd;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
r = amdgpu_bo_create_kernel(adev, hpd_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT, &kiq->eop_obj,
@ -368,7 +368,7 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
int r, i;
/* create MQD for KIQ */
ring = &adev->gfx.kiq.ring;
ring = &adev->gfx.kiq[0].ring;
if (!adev->enable_mes_kiq && !ring->mqd_obj) {
/* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must
* otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD
@ -458,7 +458,7 @@ void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev)
&ring->mqd_ptr);
}
ring = &adev->gfx.kiq.ring;
ring = &adev->gfx.kiq[0].ring;
kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]);
amdgpu_bo_free_kernel(&ring->mqd_obj,
&ring->mqd_gpu_addr,
@ -467,17 +467,17 @@ void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev)
int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev)
{
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
struct amdgpu_ring *kiq_ring = &kiq->ring;
int i, r = 0;
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
spin_lock(&adev->gfx.kiq.ring_lock);
spin_lock(&adev->gfx.kiq[0].ring_lock);
if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
adev->gfx.num_compute_rings)) {
spin_unlock(&adev->gfx.kiq.ring_lock);
spin_unlock(&adev->gfx.kiq[0].ring_lock);
return -ENOMEM;
}
@ -485,9 +485,9 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev)
kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i],
RESET_QUEUES, 0, 0);
if (adev->gfx.kiq.ring.sched.ready && !adev->job_hang)
if (adev->gfx.kiq[0].ring.sched.ready && !adev->job_hang)
r = amdgpu_ring_test_helper(kiq_ring);
spin_unlock(&adev->gfx.kiq.ring_lock);
spin_unlock(&adev->gfx.kiq[0].ring_lock);
return r;
}
@ -507,8 +507,8 @@ int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
{
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
uint64_t queue_mask = 0;
int r, i;
@ -532,13 +532,13 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
kiq_ring->queue);
spin_lock(&adev->gfx.kiq.ring_lock);
spin_lock(&adev->gfx.kiq[0].ring_lock);
r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
adev->gfx.num_compute_rings +
kiq->pmf->set_resources_size);
if (r) {
DRM_ERROR("Failed to lock KIQ (%d).\n", r);
spin_unlock(&adev->gfx.kiq.ring_lock);
spin_unlock(&adev->gfx.kiq[0].ring_lock);
return r;
}
@ -550,7 +550,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.compute_ring[i]);
r = amdgpu_ring_test_helper(kiq_ring);
spin_unlock(&adev->gfx.kiq.ring_lock);
spin_unlock(&adev->gfx.kiq[0].ring_lock);
if (r)
DRM_ERROR("KCQ enable failed\n");
@ -788,7 +788,7 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
signed long r, cnt = 0;
unsigned long flags;
uint32_t seq, reg_val_offs = 0, value = 0;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
struct amdgpu_ring *ring = &kiq->ring;
if (amdgpu_device_skip_hw_access(adev))
@ -856,7 +856,7 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
signed long r, cnt = 0;
unsigned long flags;
uint32_t seq;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
struct amdgpu_ring *ring = &kiq->ring;
BUG_ON(!ring->funcs->emit_wreg);

View File

@ -296,7 +296,7 @@ struct amdgpu_gfx {
struct amdgpu_ce ce;
struct amdgpu_me me;
struct amdgpu_mec mec;
struct amdgpu_kiq kiq;
struct amdgpu_kiq kiq[AMDGPU_MAX_GC_INSTANCES];
struct amdgpu_imu imu;
bool rs64_enable; /* firmware format */
const struct firmware *me_fw; /* ME firmware */

View File

@ -74,7 +74,7 @@ void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
uint32_t reg0, uint32_t reg1,
uint32_t ref, uint32_t mask)
{
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
struct amdgpu_ring *ring = &kiq->ring;
signed long r, cnt = 0;
unsigned long flags;

View File

@ -3568,7 +3568,7 @@ static void gfx10_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
struct amdgpu_device *adev = kiq_ring->adev;
uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
if (adev->enable_mes && !adev->gfx.kiq.ring.sched.ready) {
if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) {
amdgpu_mes_unmap_legacy_queue(adev, ring, action, gpu_addr, seq);
return;
}
@ -3636,7 +3636,7 @@ static const struct kiq_pm4_funcs gfx_v10_0_kiq_pm4_funcs = {
static void gfx_v10_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
{
adev->gfx.kiq.pmf = &gfx_v10_0_kiq_pm4_funcs;
adev->gfx.kiq[0].pmf = &gfx_v10_0_kiq_pm4_funcs;
}
static void gfx_v10_0_init_spm_golden_registers(struct amdgpu_device *adev)
@ -4550,7 +4550,7 @@ static int gfx_v10_0_sw_init(void *handle)
/* KIQ event */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP,
GFX_10_1__SRCID__CP_IB2_INTERRUPT_PKT,
&adev->gfx.kiq.irq);
&adev->gfx.kiq[0].irq);
if (r)
return r;
@ -4635,7 +4635,7 @@ static int gfx_v10_0_sw_init(void *handle)
return r;
}
kiq = &adev->gfx.kiq;
kiq = &adev->gfx.kiq[0];
r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
if (r)
return r;
@ -4693,7 +4693,7 @@ static int gfx_v10_0_sw_fini(void *handle)
amdgpu_gfx_mqd_sw_fini(adev);
if (!adev->enable_mes_kiq) {
amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring);
amdgpu_gfx_kiq_fini(adev);
}
@ -6214,7 +6214,7 @@ static void gfx_v10_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
CP_MEC_CNTL__MEC_ME2_HALT_MASK));
break;
}
adev->gfx.kiq.ring.sched.ready = false;
adev->gfx.kiq[0].ring.sched.ready = false;
}
udelay(50);
}
@ -6524,8 +6524,8 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
#ifndef BRING_UP_DEBUG
static int gfx_v10_0_kiq_enable_kgq(struct amdgpu_device *adev)
{
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
int r, i;
if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
@ -6885,7 +6885,7 @@ static int gfx_v10_0_kiq_resume(struct amdgpu_device *adev)
struct amdgpu_ring *ring;
int r;
ring = &adev->gfx.kiq.ring;
ring = &adev->gfx.kiq[0].ring;
r = amdgpu_bo_reserve(ring->mqd_obj, false);
if (unlikely(r != 0))
@ -7243,7 +7243,7 @@ static int gfx_v10_0_hw_init(void *handle)
#ifndef BRING_UP_DEBUG
static int gfx_v10_0_kiq_disable_kgq(struct amdgpu_device *adev)
{
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
struct amdgpu_ring *kiq_ring = &kiq->ring;
int i;
@ -8640,7 +8640,7 @@ static int gfx_v10_0_ring_preempt_ib(struct amdgpu_ring *ring)
{
int i, r = 0;
struct amdgpu_device *adev = ring->adev;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
struct amdgpu_ring *kiq_ring = &kiq->ring;
unsigned long flags;
@ -9148,7 +9148,7 @@ static int gfx_v10_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
enum amdgpu_interrupt_state state)
{
uint32_t tmp, target;
struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
struct amdgpu_ring *ring = &(adev->gfx.kiq[0].ring);
if (ring->me == 1)
target = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
@ -9192,7 +9192,7 @@ static int gfx_v10_0_kiq_irq(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
u8 me_id, pipe_id, queue_id;
struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
struct amdgpu_ring *ring = &(adev->gfx.kiq[0].ring);
me_id = (entry->ring_id & 0x0c) >> 2;
pipe_id = (entry->ring_id & 0x03) >> 0;
@ -9369,7 +9369,7 @@ static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
adev->gfx.kiq.ring.funcs = &gfx_v10_0_ring_funcs_kiq;
adev->gfx.kiq[0].ring.funcs = &gfx_v10_0_ring_funcs_kiq;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
adev->gfx.gfx_ring[i].funcs = &gfx_v10_0_ring_funcs_gfx;
@ -9403,8 +9403,8 @@ static void gfx_v10_0_set_irq_funcs(struct amdgpu_device *adev)
adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
adev->gfx.eop_irq.funcs = &gfx_v10_0_eop_irq_funcs;
adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST;
adev->gfx.kiq.irq.funcs = &gfx_v10_0_kiq_irq_funcs;
adev->gfx.kiq[0].irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST;
adev->gfx.kiq[0].irq.funcs = &gfx_v10_0_kiq_irq_funcs;
adev->gfx.priv_reg_irq.num_types = 1;
adev->gfx.priv_reg_irq.funcs = &gfx_v10_0_priv_reg_irq_funcs;

View File

@ -192,7 +192,7 @@ static void gfx11_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
struct amdgpu_device *adev = kiq_ring->adev;
uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
if (adev->enable_mes && !adev->gfx.kiq.ring.sched.ready) {
if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) {
amdgpu_mes_unmap_legacy_queue(adev, ring, action, gpu_addr, seq);
return;
}
@ -260,7 +260,7 @@ static const struct kiq_pm4_funcs gfx_v11_0_kiq_pm4_funcs = {
static void gfx_v11_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
{
adev->gfx.kiq.pmf = &gfx_v11_0_kiq_pm4_funcs;
adev->gfx.kiq[0].pmf = &gfx_v11_0_kiq_pm4_funcs;
}
static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev)
@ -1395,7 +1395,7 @@ static int gfx_v11_0_sw_init(void *handle)
return r;
}
kiq = &adev->gfx.kiq;
kiq = &adev->gfx.kiq[0];
r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
if (r)
return r;
@ -1466,7 +1466,7 @@ static int gfx_v11_0_sw_fini(void *handle)
amdgpu_gfx_mqd_sw_fini(adev);
if (!adev->enable_mes_kiq) {
amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring);
amdgpu_gfx_kiq_fini(adev);
}
@ -3337,7 +3337,7 @@ static void gfx_v11_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
WREG32_SOC15(GC, 0, regCP_MEC_CNTL, data);
}
adev->gfx.kiq.ring.sched.ready = enable;
adev->gfx.kiq[0].ring.sched.ready = enable;
udelay(50);
}
@ -3732,8 +3732,8 @@ static int gfx_v11_0_gfx_init_queue(struct amdgpu_ring *ring)
#ifndef BRING_UP_DEBUG
static int gfx_v11_0_kiq_enable_kgq(struct amdgpu_device *adev)
{
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
int r, i;
if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
@ -4108,7 +4108,7 @@ static int gfx_v11_0_kiq_resume(struct amdgpu_device *adev)
struct amdgpu_ring *ring;
int r;
ring = &adev->gfx.kiq.ring;
ring = &adev->gfx.kiq[0].ring;
r = amdgpu_bo_reserve(ring->mqd_obj, false);
if (unlikely(r != 0))
@ -4417,7 +4417,7 @@ static int gfx_v11_0_hw_init(void *handle)
#ifndef BRING_UP_DEBUG
static int gfx_v11_0_kiq_disable_kgq(struct amdgpu_device *adev)
{
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
struct amdgpu_ring *kiq_ring = &kiq->ring;
int i, r = 0;
@ -4432,7 +4432,7 @@ static int gfx_v11_0_kiq_disable_kgq(struct amdgpu_device *adev)
kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.gfx_ring[i],
PREEMPT_QUEUES, 0, 0);
if (adev->gfx.kiq.ring.sched.ready)
if (adev->gfx.kiq[0].ring.sched.ready)
r = amdgpu_ring_test_helper(kiq_ring);
return r;
@ -5622,7 +5622,7 @@ static int gfx_v11_0_ring_preempt_ib(struct amdgpu_ring *ring)
{
int i, r = 0;
struct amdgpu_device *adev = ring->adev;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
struct amdgpu_ring *kiq_ring = &kiq->ring;
unsigned long flags;
@ -6120,7 +6120,7 @@ static int gfx_v11_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
enum amdgpu_interrupt_state state)
{
uint32_t tmp, target;
struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
struct amdgpu_ring *ring = &(adev->gfx.kiq[0].ring);
target = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL);
target += ring->pipe;
@ -6317,7 +6317,7 @@ static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
adev->gfx.kiq.ring.funcs = &gfx_v11_0_ring_funcs_kiq;
adev->gfx.kiq[0].ring.funcs = &gfx_v11_0_ring_funcs_kiq;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
adev->gfx.gfx_ring[i].funcs = &gfx_v11_0_ring_funcs_gfx;

View File

@ -2021,7 +2021,7 @@ static int gfx_v8_0_sw_init(void *handle)
return r;
}
kiq = &adev->gfx.kiq;
kiq = &adev->gfx.kiq[0];
r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
if (r)
return r;
@ -2051,7 +2051,7 @@ static int gfx_v8_0_sw_fini(void *handle)
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
amdgpu_gfx_mqd_sw_fini(adev);
amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring);
amdgpu_gfx_kiq_fini(adev);
gfx_v8_0_mec_fini(adev);
@ -4292,7 +4292,7 @@ static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
WREG32(mmCP_MEC_CNTL, 0);
} else {
WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
adev->gfx.kiq.ring.sched.ready = false;
adev->gfx.kiq[0].ring.sched.ready = false;
}
udelay(50);
}
@ -4314,7 +4314,7 @@ static void gfx_v8_0_kiq_setting(struct amdgpu_ring *ring)
static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev)
{
struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
uint64_t queue_mask = 0;
int r, i;
@ -4678,7 +4678,7 @@ static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev)
struct amdgpu_ring *ring;
int r;
ring = &adev->gfx.kiq.ring;
ring = &adev->gfx.kiq[0].ring;
r = amdgpu_bo_reserve(ring->mqd_obj, false);
if (unlikely(r != 0))
@ -4741,7 +4741,7 @@ static int gfx_v8_0_cp_test_all_rings(struct amdgpu_device *adev)
if (r)
return r;
ring = &adev->gfx.kiq.ring;
ring = &adev->gfx.kiq[0].ring;
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
@ -4808,7 +4808,7 @@ static int gfx_v8_0_hw_init(void *handle)
static int gfx_v8_0_kcq_disable(struct amdgpu_device *adev)
{
int r, i;
struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
r = amdgpu_ring_alloc(kiq_ring, 6 * adev->gfx.num_compute_rings);
if (r)
@ -7001,7 +7001,7 @@ static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
adev->gfx.kiq.ring.funcs = &gfx_v8_0_ring_funcs_kiq;
adev->gfx.kiq[0].ring.funcs = &gfx_v8_0_ring_funcs_kiq;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
adev->gfx.gfx_ring[i].funcs = &gfx_v8_0_ring_funcs_gfx;

View File

@ -898,7 +898,7 @@ static const struct kiq_pm4_funcs gfx_v9_0_kiq_pm4_funcs = {
static void gfx_v9_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
{
adev->gfx.kiq.pmf = &gfx_v9_0_kiq_pm4_funcs;
adev->gfx.kiq[0].pmf = &gfx_v9_0_kiq_pm4_funcs;
}
static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
@ -2174,7 +2174,7 @@ static int gfx_v9_0_sw_init(void *handle)
return r;
}
kiq = &adev->gfx.kiq;
kiq = &adev->gfx.kiq[0];
r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
if (r)
return r;
@ -2216,7 +2216,7 @@ static int gfx_v9_0_sw_fini(void *handle)
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
amdgpu_gfx_mqd_sw_fini(adev);
amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring);
amdgpu_gfx_kiq_fini(adev);
gfx_v9_0_mec_fini(adev);
@ -3155,7 +3155,7 @@ static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
} else {
WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL,
(CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
adev->gfx.kiq.ring.sched.ready = false;
adev->gfx.kiq[0].ring.sched.ready = false;
}
udelay(50);
}
@ -3610,7 +3610,7 @@ static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
struct amdgpu_ring *ring;
int r;
ring = &adev->gfx.kiq.ring;
ring = &adev->gfx.kiq[0].ring;
r = amdgpu_bo_reserve(ring->mqd_obj, false);
if (unlikely(r != 0))
@ -3789,10 +3789,10 @@ static int gfx_v9_0_hw_fini(void *handle)
*/
if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
mutex_lock(&adev->srbm_mutex);
soc15_grbm_select(adev, adev->gfx.kiq.ring.me,
adev->gfx.kiq.ring.pipe,
adev->gfx.kiq.ring.queue, 0);
gfx_v9_0_kiq_fini_register(&adev->gfx.kiq.ring);
soc15_grbm_select(adev, adev->gfx.kiq[0].ring.me,
adev->gfx.kiq[0].ring.pipe,
adev->gfx.kiq[0].ring.queue, 0);
gfx_v9_0_kiq_fini_register(&adev->gfx.kiq[0].ring);
soc15_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
}
@ -3913,7 +3913,7 @@ static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
unsigned long flags;
uint32_t seq, reg_val_offs = 0;
uint64_t value = 0;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
struct amdgpu_ring *ring = &kiq->ring;
BUG_ON(!ring->funcs->emit_rreg);
@ -5385,7 +5385,7 @@ static int gfx_v9_0_ring_preempt_ib(struct amdgpu_ring *ring)
{
int i, r = 0;
struct amdgpu_device *adev = ring->adev;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
struct amdgpu_ring *kiq_ring = &kiq->ring;
unsigned long flags;
@ -6964,7 +6964,7 @@ static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
adev->gfx.kiq.ring.funcs = &gfx_v9_0_ring_funcs_kiq;
adev->gfx.kiq[0].ring.funcs = &gfx_v9_0_ring_funcs_kiq;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx;

View File

@ -343,7 +343,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
/* For SRIOV run time, driver shouldn't access the register through MMIO
* Directly use kiq to do the vm invalidation instead
*/
if (adev->gfx.kiq.ring.sched.ready && !adev->enable_mes &&
if (adev->gfx.kiq[0].ring.sched.ready && !adev->enable_mes &&
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
down_read_trylock(&adev->reset_domain->sem)) {
struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
@ -428,11 +428,11 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
uint16_t queried_pasid;
bool ret;
u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_ring *ring = &adev->gfx.kiq[0].ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
if (amdgpu_emu_mode == 0 && ring->sched.ready) {
spin_lock(&adev->gfx.kiq.ring_lock);
spin_lock(&adev->gfx.kiq[0].ring_lock);
/* 2 dwords flush + 8 dwords fence */
amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
kiq->pmf->kiq_invalidate_tlbs(ring,
@ -440,12 +440,12 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
if (r) {
amdgpu_ring_undo(ring);
spin_unlock(&adev->gfx.kiq.ring_lock);
spin_unlock(&adev->gfx.kiq[0].ring_lock);
return -ETIME;
}
amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq.ring_lock);
spin_unlock(&adev->gfx.kiq[0].ring_lock);
r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
if (r < 1) {
dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);

View File

@ -291,7 +291,7 @@ static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
/* For SRIOV run time, driver shouldn't access the register through MMIO
* Directly use kiq to do the vm invalidation instead
*/
if ((adev->gfx.kiq.ring.sched.ready || adev->mes.ring.sched.ready) &&
if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring.sched.ready) &&
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
const unsigned eng = 17;
@ -329,11 +329,11 @@ static int gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
uint32_t seq;
uint16_t queried_pasid;
bool ret;
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_ring *ring = &adev->gfx.kiq[0].ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
if (amdgpu_emu_mode == 0 && ring->sched.ready) {
spin_lock(&adev->gfx.kiq.ring_lock);
spin_lock(&adev->gfx.kiq[0].ring_lock);
/* 2 dwords flush + 8 dwords fence */
amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
kiq->pmf->kiq_invalidate_tlbs(ring,
@ -341,12 +341,12 @@ static int gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
if (r) {
amdgpu_ring_undo(ring);
spin_unlock(&adev->gfx.kiq.ring_lock);
spin_unlock(&adev->gfx.kiq[0].ring_lock);
return -ETIME;
}
amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq.ring_lock);
spin_unlock(&adev->gfx.kiq[0].ring_lock);
r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
if (r < 1) {
dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);

View File

@ -824,7 +824,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
/* This is necessary for a HW workaround under SRIOV as well
* as GFXOFF under bare metal
*/
if (adev->gfx.kiq.ring.sched.ready &&
if (adev->gfx.kiq[0].ring.sched.ready &&
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
down_read_trylock(&adev->reset_domain->sem)) {
uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
@ -934,8 +934,8 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
uint16_t queried_pasid;
bool ret;
u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_ring *ring = &adev->gfx.kiq[0].ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
if (amdgpu_in_reset(adev))
return -EIO;
@ -955,7 +955,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
if (vega20_xgmi_wa)
ndw += kiq->pmf->invalidate_tlbs_size;
spin_lock(&adev->gfx.kiq.ring_lock);
spin_lock(&adev->gfx.kiq[0].ring_lock);
/* 2 dwords flush + 8 dwords fence */
amdgpu_ring_alloc(ring, ndw);
if (vega20_xgmi_wa)
@ -966,13 +966,13 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
if (r) {
amdgpu_ring_undo(ring);
spin_unlock(&adev->gfx.kiq.ring_lock);
spin_unlock(&adev->gfx.kiq[0].ring_lock);
up_read(&adev->reset_domain->sem);
return -ETIME;
}
amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq.ring_lock);
spin_unlock(&adev->gfx.kiq[0].ring_lock);
r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
if (r < 1) {
dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);

View File

@ -797,8 +797,8 @@ static void mes_v10_1_queue_init_register(struct amdgpu_ring *ring)
static int mes_v10_1_kiq_enable_queue(struct amdgpu_device *adev)
{
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
int r;
if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
@ -863,9 +863,9 @@ static int mes_v10_1_kiq_ring_init(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
spin_lock_init(&adev->gfx.kiq.ring_lock);
spin_lock_init(&adev->gfx.kiq[0].ring_lock);
ring = &adev->gfx.kiq.ring;
ring = &adev->gfx.kiq[0].ring;
ring->me = 3;
ring->pipe = 1;
@ -891,7 +891,7 @@ static int mes_v10_1_mqd_sw_init(struct amdgpu_device *adev,
struct amdgpu_ring *ring;
if (pipe == AMDGPU_MES_KIQ_PIPE)
ring = &adev->gfx.kiq.ring;
ring = &adev->gfx.kiq[0].ring;
else if (pipe == AMDGPU_MES_SCHED_PIPE)
ring = &adev->mes.ring;
else
@ -974,15 +974,15 @@ static int mes_v10_1_sw_fini(void *handle)
amdgpu_ucode_release(&adev->mes.fw[pipe]);
}
amdgpu_bo_free_kernel(&adev->gfx.kiq.ring.mqd_obj,
&adev->gfx.kiq.ring.mqd_gpu_addr,
&adev->gfx.kiq.ring.mqd_ptr);
amdgpu_bo_free_kernel(&adev->gfx.kiq[0].ring.mqd_obj,
&adev->gfx.kiq[0].ring.mqd_gpu_addr,
&adev->gfx.kiq[0].ring.mqd_ptr);
amdgpu_bo_free_kernel(&adev->mes.ring.mqd_obj,
&adev->mes.ring.mqd_gpu_addr,
&adev->mes.ring.mqd_ptr);
amdgpu_ring_fini(&adev->gfx.kiq.ring);
amdgpu_ring_fini(&adev->gfx.kiq[0].ring);
amdgpu_ring_fini(&adev->mes.ring);
amdgpu_mes_fini(adev);
@ -1038,7 +1038,7 @@ static int mes_v10_1_kiq_hw_init(struct amdgpu_device *adev)
mes_v10_1_enable(adev, true);
mes_v10_1_kiq_setting(&adev->gfx.kiq.ring);
mes_v10_1_kiq_setting(&adev->gfx.kiq[0].ring);
r = mes_v10_1_queue_init(adev);
if (r)
@ -1090,7 +1090,7 @@ static int mes_v10_1_hw_init(void *handle)
* MES uses KIQ ring exclusively so driver cannot access KIQ ring
* with MES enabled.
*/
adev->gfx.kiq.ring.sched.ready = false;
adev->gfx.kiq[0].ring.sched.ready = false;
adev->mes.ring.sched.ready = true;
return 0;

View File

@ -864,8 +864,8 @@ static void mes_v11_0_queue_init_register(struct amdgpu_ring *ring)
static int mes_v11_0_kiq_enable_queue(struct amdgpu_device *adev)
{
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
int r;
if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
@ -894,7 +894,7 @@ static int mes_v11_0_queue_init(struct amdgpu_device *adev,
int r;
if (pipe == AMDGPU_MES_KIQ_PIPE)
ring = &adev->gfx.kiq.ring;
ring = &adev->gfx.kiq[0].ring;
else if (pipe == AMDGPU_MES_SCHED_PIPE)
ring = &adev->mes.ring;
else
@ -961,9 +961,9 @@ static int mes_v11_0_kiq_ring_init(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
spin_lock_init(&adev->gfx.kiq.ring_lock);
spin_lock_init(&adev->gfx.kiq[0].ring_lock);
ring = &adev->gfx.kiq.ring;
ring = &adev->gfx.kiq[0].ring;
ring->me = 3;
ring->pipe = 1;
@ -989,7 +989,7 @@ static int mes_v11_0_mqd_sw_init(struct amdgpu_device *adev,
struct amdgpu_ring *ring;
if (pipe == AMDGPU_MES_KIQ_PIPE)
ring = &adev->gfx.kiq.ring;
ring = &adev->gfx.kiq[0].ring;
else if (pipe == AMDGPU_MES_SCHED_PIPE)
ring = &adev->mes.ring;
else
@ -1074,15 +1074,15 @@ static int mes_v11_0_sw_fini(void *handle)
amdgpu_ucode_release(&adev->mes.fw[pipe]);
}
amdgpu_bo_free_kernel(&adev->gfx.kiq.ring.mqd_obj,
&adev->gfx.kiq.ring.mqd_gpu_addr,
&adev->gfx.kiq.ring.mqd_ptr);
amdgpu_bo_free_kernel(&adev->gfx.kiq[0].ring.mqd_obj,
&adev->gfx.kiq[0].ring.mqd_gpu_addr,
&adev->gfx.kiq[0].ring.mqd_ptr);
amdgpu_bo_free_kernel(&adev->mes.ring.mqd_obj,
&adev->mes.ring.mqd_gpu_addr,
&adev->mes.ring.mqd_ptr);
amdgpu_ring_fini(&adev->gfx.kiq.ring);
amdgpu_ring_fini(&adev->gfx.kiq[0].ring);
amdgpu_ring_fini(&adev->mes.ring);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
@ -1175,7 +1175,7 @@ static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev)
mes_v11_0_enable(adev, true);
mes_v11_0_kiq_setting(&adev->gfx.kiq.ring);
mes_v11_0_kiq_setting(&adev->gfx.kiq[0].ring);
r = mes_v11_0_queue_init(adev, AMDGPU_MES_KIQ_PIPE);
if (r)
@ -1196,7 +1196,7 @@ static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev)
}
if (amdgpu_sriov_vf(adev)) {
mes_v11_0_kiq_dequeue(&adev->gfx.kiq.ring);
mes_v11_0_kiq_dequeue(&adev->gfx.kiq[0].ring);
mes_v11_0_kiq_clear(adev);
}
@ -1244,7 +1244,7 @@ static int mes_v11_0_hw_init(void *handle)
* MES uses KIQ ring exclusively so driver cannot access KIQ ring
* with MES enabled.
*/
adev->gfx.kiq.ring.sched.ready = false;
adev->gfx.kiq[0].ring.sched.ready = false;
adev->mes.ring.sched.ready = true;
return 0;