drm/amdgpu: Switch to SOC partition funcs

For GFXv9.4.3, use SOC level partition switch implementation rather than
keeping them at GFX IP level. Change the exisiting implementation in
GFX IP for keeping partition mode and restrict it to only GFX related
switch.

Signed-off-by: Lijo Lazar <lijo.lazar@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Lijo Lazar 2022-11-16 17:15:47 +05:30 committed by Alex Deucher
parent e56c9ef6cb
commit 8e7fd19380
5 changed files with 20 additions and 87 deletions

View file

@ -28,6 +28,7 @@
#include "amdgpu_gfx.h"
#include "amdgpu_rlc.h"
#include "amdgpu_ras.h"
#include "amdgpu_xcp.h"
/* delay 0.1 second to enable gfx off feature */
#define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100)
@ -1170,10 +1171,10 @@ static ssize_t amdgpu_gfx_get_current_compute_partition(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
enum amdgpu_gfx_partition mode;
int mode;
char *partition_mode;
mode = adev->gfx.funcs->query_partition_mode(adev);
mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr);
switch (mode) {
case AMDGPU_SPX_PARTITION_MODE:
@ -1254,31 +1255,7 @@ static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev,
return -EINVAL;
}
if (!adev->kfd.init_complete)
return -EPERM;
mutex_lock(&adev->gfx.partition_mutex);
if (mode == adev->gfx.funcs->query_partition_mode(adev))
goto out;
ret = amdgpu_amdkfd_check_and_lock_kfd(adev);
if (ret)
goto out;
amdgpu_amdkfd_device_fini_sw(adev);
adev->gfx.funcs->switch_partition_mode(adev, mode);
amdgpu_amdkfd_device_probe(adev);
amdgpu_amdkfd_device_init(adev);
/* If KFD init failed, return failure */
if (!adev->kfd.init_complete)
ret = -EIO;
amdgpu_amdkfd_unlock_kfd(adev);
out:
mutex_unlock(&adev->gfx.partition_mutex);
ret = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, mode);
if (ret)
return ret;

View file

@ -278,11 +278,7 @@ struct amdgpu_gfx_funcs {
(*query_partition_mode)(struct amdgpu_device *adev);
enum amdgpu_memory_partition
(*query_mem_partition_mode)(struct amdgpu_device *adev);
int (*switch_partition_mode)(struct amdgpu_device *adev,
enum amdgpu_gfx_partition mode);
int (*switch_gfx_partition_mode)(struct amdgpu_device *adev,
int num_xccs_per_xcp);
};
@ -416,7 +412,6 @@ struct amdgpu_gfx {
bool cp_gfx_shadow; /* for gfx11 */
enum amdgpu_gfx_partition partition_mode;
uint16_t xcc_mask;
enum amdgpu_memory_partition mem_partition_mode;
uint32_t num_xcc_per_xcp;

View file

@ -307,8 +307,8 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
goto unlock;
num_xcc_per_xcp = __aqua_vanjaram_get_xcc_per_xcp(xcp_mgr, mode);
if (adev->gfx.funcs->switch_gfx_partition_mode)
adev->gfx.funcs->switch_gfx_partition_mode(xcp_mgr->adev,
if (adev->gfx.funcs->switch_partition_mode)
adev->gfx.funcs->switch_partition_mode(xcp_mgr->adev,
num_xcc_per_xcp);
if (adev->nbio.funcs->set_compute_partition_mode)

View file

@ -38,6 +38,7 @@
#include "gc/gc_9_4_3_sh_mask.h"
#include "gfx_v9_4_3.h"
#include "amdgpu_xcp.h"
MODULE_FIRMWARE("amdgpu/gc_9_4_3_mec.bin");
MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin");
@ -614,61 +615,23 @@ gfx_v9_4_3_query_memory_partition(struct amdgpu_device *adev)
return mode;
}
static enum amdgpu_gfx_partition
gfx_v9_4_3_query_compute_partition(struct amdgpu_device *adev)
{
enum amdgpu_gfx_partition mode = adev->gfx.partition_mode;
if (adev->nbio.funcs->get_compute_partition_mode)
mode = adev->nbio.funcs->get_compute_partition_mode(adev);
return mode;
}
static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev,
enum amdgpu_gfx_partition mode)
int num_xccs_per_xcp)
{
int i, num_xcc;
u32 tmp = 0;
int num_xcc_per_partition, i, num_xcc;
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
switch (mode) {
case AMDGPU_SPX_PARTITION_MODE:
num_xcc_per_partition = num_xcc;
break;
case AMDGPU_DPX_PARTITION_MODE:
num_xcc_per_partition = num_xcc / 2;
break;
case AMDGPU_TPX_PARTITION_MODE:
num_xcc_per_partition = num_xcc / 3;
break;
case AMDGPU_QPX_PARTITION_MODE:
num_xcc_per_partition = num_xcc / 4;
break;
case AMDGPU_CPX_PARTITION_MODE:
num_xcc_per_partition = 1;
break;
default:
return -EINVAL;
}
/* TODO:
* Stop user queues and threads, and make sure GPU is empty of work.
*/
for (i = 0; i < num_xcc; i++) {
tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP,
num_xcc_per_partition);
num_xccs_per_xcp);
tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, VIRTUAL_XCC_ID,
i % num_xcc_per_partition);
i % num_xccs_per_xcp);
WREG32_SOC15(GC, GET_INST(GC, i), regCP_HYP_XCP_CTL, tmp);
}
if (adev->nbio.funcs->set_compute_partition_mode)
adev->nbio.funcs->set_compute_partition_mode(adev, mode);
adev->gfx.num_xcc_per_xcp = num_xcc_per_partition;
adev->gfx.partition_mode = mode;
adev->gfx.num_xcc_per_xcp = num_xccs_per_xcp;
return 0;
}
@ -680,7 +643,6 @@ static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = {
.read_wave_sgprs = &gfx_v9_4_3_read_wave_sgprs,
.read_wave_vgprs = &gfx_v9_4_3_read_wave_vgprs,
.select_me_pipe_q = &gfx_v9_4_3_select_me_pipe_q,
.query_partition_mode = &gfx_v9_4_3_query_compute_partition,
.switch_partition_mode = &gfx_v9_4_3_switch_compute_partition,
.query_mem_partition_mode = &gfx_v9_4_3_query_memory_partition,
};
@ -1899,10 +1861,6 @@ static int gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device *adev, int xcc_id)
return r;
}
if (adev->gfx.partition_mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
gfx_v9_4_3_switch_compute_partition(adev,
amdgpu_user_partt_mode);
/* set the virtual and physical id based on partition_mode */
gfx_v9_4_3_xcc_program_xcc_id(adev, xcc_id);
@ -1931,6 +1889,9 @@ static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev)
{
int r, i, num_xcc;
if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr) == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, amdgpu_user_partt_mode);
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
for (i = 0; i < num_xcc; i++) {
r = gfx_v9_4_3_xcc_cp_resume(adev, i);
@ -2146,8 +2107,6 @@ static int gfx_v9_4_3_early_init(void *handle)
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
adev->gfx.partition_mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
AMDGPU_MAX_COMPUTE_RINGS);
gfx_v9_4_3_set_kiq_pm4_funcs(adev);

View file

@ -34,6 +34,7 @@
#include "kfd_smi_events.h"
#include "kfd_migrate.h"
#include "amdgpu.h"
#include "amdgpu_xcp.h"
#define MQD_SIZE_ALIGNED 768
@ -592,7 +593,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
struct kfd_node *node;
uint32_t first_vmid_kfd, last_vmid_kfd, vmid_num_kfd;
unsigned int max_proc_per_quantum;
int num_xcd;
int num_xcd, partition_mode;
kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
KGD_ENGINE_MEC1);
@ -644,8 +645,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
* If the VMID range changes for GFX9.4.3, then this code MUST be
* revisited.
*/
partition_mode = amdgpu_xcp_query_partition_mode(kfd->adev->xcp_mgr);
if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) &&
kfd->adev->gfx.partition_mode == AMDGPU_CPX_PARTITION_MODE &&
partition_mode == AMDGPU_CPX_PARTITION_MODE &&
kfd->num_nodes != 1) {
vmid_num_kfd /= 2;
first_vmid_kfd = last_vmid_kfd + 1 - vmid_num_kfd*2;
@ -761,7 +763,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
node->start_xcc_id = node->num_xcc_per_node * i;
if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) &&
kfd->adev->gfx.partition_mode == AMDGPU_CPX_PARTITION_MODE &&
partition_mode == AMDGPU_CPX_PARTITION_MODE &&
kfd->num_nodes != 1) {
/* For GFX9.4.3 and CPX mode, first XCD gets VMID range
* 4-9 and second XCD gets VMID range 10-15.