mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-30 22:26:55 +00:00
drm/amdgpu: add dummy event6 for vega10
[why] Malicious mailbox event1 fails driver loading on vega10. A dummy event6 prevent driver from taking response from malicious event1 as its own. [how] On vega10, send a mailbox event6 before sending event1. Signed-off-by: James Yao <yiqing.yao@amd.com> Reviewed-by: Jingwen Chen <Jingwen.Chen2@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
5b0ce2d41b
commit
216a987319
3 changed files with 17 additions and 0 deletions
|
@ -727,6 +727,10 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev)
|
||||||
vi_set_virt_ops(adev);
|
vi_set_virt_ops(adev);
|
||||||
break;
|
break;
|
||||||
case CHIP_VEGA10:
|
case CHIP_VEGA10:
|
||||||
|
soc15_set_virt_ops(adev);
|
||||||
|
/* send a dummy GPU_INIT_DATA request to host on vega10 */
|
||||||
|
amdgpu_virt_request_init_data(adev);
|
||||||
|
break;
|
||||||
case CHIP_VEGA20:
|
case CHIP_VEGA20:
|
||||||
case CHIP_ARCTURUS:
|
case CHIP_ARCTURUS:
|
||||||
case CHIP_ALDEBARAN:
|
case CHIP_ALDEBARAN:
|
||||||
|
|
|
@ -180,6 +180,11 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
|
||||||
RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
|
RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
|
||||||
mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2));
|
mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2));
|
||||||
}
|
}
|
||||||
|
} else if (req == IDH_REQ_GPU_INIT_DATA){
|
||||||
|
/* Dummy REQ_GPU_INIT_DATA handling */
|
||||||
|
r = xgpu_ai_poll_msg(adev, IDH_REQ_GPU_INIT_DATA_READY);
|
||||||
|
/* version set to 0 since dummy */
|
||||||
|
adev->virt.req_init_data_ver = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -381,10 +386,16 @@ void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev)
|
||||||
amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
|
amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int xgpu_ai_request_init_data(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
|
||||||
|
}
|
||||||
|
|
||||||
const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
|
const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
|
||||||
.req_full_gpu = xgpu_ai_request_full_gpu_access,
|
.req_full_gpu = xgpu_ai_request_full_gpu_access,
|
||||||
.rel_full_gpu = xgpu_ai_release_full_gpu_access,
|
.rel_full_gpu = xgpu_ai_release_full_gpu_access,
|
||||||
.reset_gpu = xgpu_ai_request_reset,
|
.reset_gpu = xgpu_ai_request_reset,
|
||||||
.wait_reset = NULL,
|
.wait_reset = NULL,
|
||||||
.trans_msg = xgpu_ai_mailbox_trans_msg,
|
.trans_msg = xgpu_ai_mailbox_trans_msg,
|
||||||
|
.req_init_data = xgpu_ai_request_init_data,
|
||||||
};
|
};
|
||||||
|
|
|
@ -35,6 +35,7 @@ enum idh_request {
|
||||||
IDH_REQ_GPU_FINI_ACCESS,
|
IDH_REQ_GPU_FINI_ACCESS,
|
||||||
IDH_REL_GPU_FINI_ACCESS,
|
IDH_REL_GPU_FINI_ACCESS,
|
||||||
IDH_REQ_GPU_RESET_ACCESS,
|
IDH_REQ_GPU_RESET_ACCESS,
|
||||||
|
IDH_REQ_GPU_INIT_DATA,
|
||||||
|
|
||||||
IDH_LOG_VF_ERROR = 200,
|
IDH_LOG_VF_ERROR = 200,
|
||||||
IDH_READY_TO_RESET = 201,
|
IDH_READY_TO_RESET = 201,
|
||||||
|
@ -48,6 +49,7 @@ enum idh_event {
|
||||||
IDH_SUCCESS,
|
IDH_SUCCESS,
|
||||||
IDH_FAIL,
|
IDH_FAIL,
|
||||||
IDH_QUERY_ALIVE,
|
IDH_QUERY_ALIVE,
|
||||||
|
IDH_REQ_GPU_INIT_DATA_READY,
|
||||||
|
|
||||||
IDH_TEXT_MESSAGE = 255,
|
IDH_TEXT_MESSAGE = 255,
|
||||||
};
|
};
|
||||||
|
|
Loading…
Reference in a new issue