Merge branch 'drm-next-4.19' of git://people.freedesktop.org/~agd5f/linux into drm-next

Updates for 4.19.  Mostly bug fixes and cleanups.  Highlights:
- Internal API cleanup in GPU scheduler
- Decouple i2c and aux abstractions in DC
- Update maintainers
- Misc cleanups
- Misc bug fixes

Signed-off-by: Dave Airlie <airlied@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180725215326.2709-1-alexander.deucher@amd.com
This commit is contained in:
Dave Airlie 2018-07-27 12:31:07 +10:00
commit 6d52aacd92
58 changed files with 1932 additions and 291 deletions

View file

@ -728,6 +728,14 @@ S: Supported
F: drivers/crypto/ccp/
F: include/linux/ccp.h
AMD DISPLAY CORE
M: Harry Wentland <harry.wentland@amd.com>
M: Leo Li <sunpeng.li@amd.com>
L: amd-gfx@lists.freedesktop.org
T: git git://people.freedesktop.org/~agd5f/linux
S: Supported
F: drivers/gpu/drm/amd/display/
AMD FAM15H PROCESSOR POWER MONITORING DRIVER
M: Huang Rui <ray.huang@amd.com>
L: linux-hwmon@vger.kernel.org
@ -777,6 +785,14 @@ F: drivers/gpu/drm/amd/include/vi_structs.h
F: drivers/gpu/drm/amd/include/v9_structs.h
F: include/uapi/linux/kfd_ioctl.h
AMD POWERPLAY
M: Rex Zhu <rex.zhu@amd.com>
M: Evan Quan <evan.quan@amd.com>
L: amd-gfx@lists.freedesktop.org
S: Supported
F: drivers/gpu/drm/amd/powerplay/
T: git git://people.freedesktop.org/~agd5f/linux
AMD SEATTLE DEVICE TREE SUPPORT
M: Brijesh Singh <brijeshkumar.singh@amd.com>
M: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
@ -4883,7 +4899,8 @@ F: Documentation/gpu/xen-front.rst
DRM TTM SUBSYSTEM
M: Christian Koenig <christian.koenig@amd.com>
M: Roger He <Hongbo.He@amd.com>
M: Huang Rui <ray.huang@amd.com>
M: Junwei Zhang <Jerry.Zhang@amd.com>
T: git git://people.freedesktop.org/~agd5f/linux
S: Maintained
L: dri-devel@lists.freedesktop.org

View file

@ -1801,8 +1801,6 @@ void amdgpu_display_update_priority(struct amdgpu_device *adev);
void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
u64 num_vis_bytes);
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
void amdgpu_device_vram_location(struct amdgpu_device *adev,
struct amdgpu_gmc *mc, u64 base);
void amdgpu_device_gart_location(struct amdgpu_device *adev,

View file

@ -364,7 +364,6 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
struct acpi_bus_event *event)
{
struct amdgpu_atif *atif = adev->atif;
struct atif_sbios_requests req;
int count;
DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n",
@ -379,42 +378,48 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
/* Not our event */
return NOTIFY_DONE;
/* Check pending SBIOS requests */
count = amdgpu_atif_get_sbios_requests(atif, &req);
if (atif->functions.sbios_requests) {
struct atif_sbios_requests req;
if (count <= 0)
return NOTIFY_DONE;
/* Check pending SBIOS requests */
count = amdgpu_atif_get_sbios_requests(atif, &req);
DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count);
if (count <= 0)
return NOTIFY_DONE;
if (req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) {
struct amdgpu_encoder *enc = atif->encoder_for_bl;
DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count);
if (enc) {
struct amdgpu_encoder_atom_dig *dig = enc->enc_priv;
/* todo: add DC handling */
if ((req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) &&
!amdgpu_device_has_dc_support(adev)) {
struct amdgpu_encoder *enc = atif->encoder_for_bl;
DRM_DEBUG_DRIVER("Changing brightness to %d\n",
req.backlight_level);
if (enc) {
struct amdgpu_encoder_atom_dig *dig = enc->enc_priv;
amdgpu_display_backlight_set_level(adev, enc, req.backlight_level);
DRM_DEBUG_DRIVER("Changing brightness to %d\n",
req.backlight_level);
amdgpu_display_backlight_set_level(adev, enc, req.backlight_level);
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
backlight_force_update(dig->bl_dev,
BACKLIGHT_UPDATE_HOTKEY);
backlight_force_update(dig->bl_dev,
BACKLIGHT_UPDATE_HOTKEY);
#endif
}
}
}
if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
if ((adev->flags & AMD_IS_PX) &&
amdgpu_atpx_dgpu_req_power_for_displays()) {
pm_runtime_get_sync(adev->ddev->dev);
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(adev->ddev);
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
if ((adev->flags & AMD_IS_PX) &&
amdgpu_atpx_dgpu_req_power_for_displays()) {
pm_runtime_get_sync(adev->ddev->dev);
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(adev->ddev);
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
}
}
/* TODO: check other events */
}
/* TODO: check other events */
/* We've handled the event, stop the notifier chain. The ACPI interface
* overloads ACPI_VIDEO_NOTIFY_PROBE, we don't want to send that to

View file

@ -334,7 +334,7 @@ static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
"Called with userptr BO"))
return -EINVAL;
amdgpu_ttm_placement_from_domain(bo, domain);
amdgpu_bo_placement_from_domain(bo, domain);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (ret)
@ -622,7 +622,7 @@ static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm,
pr_err("%s: Failed to reserve BO\n", __func__);
goto release_out;
}
amdgpu_ttm_placement_from_domain(bo, mem->domain);
amdgpu_bo_placement_from_domain(bo, mem->domain);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (ret)
pr_err("%s: failed to validate BO\n", __func__);
@ -1680,7 +1680,7 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
if (amdgpu_bo_reserve(bo, true))
return -EAGAIN;
amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
amdgpu_bo_unreserve(bo);
if (ret) {
@ -1824,7 +1824,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
if (mem->user_pages[0]) {
amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
mem->user_pages);
amdgpu_ttm_placement_from_domain(bo, mem->domain);
amdgpu_bo_placement_from_domain(bo, mem->domain);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (ret) {
pr_err("%s: failed to validate BO\n", __func__);

View file

@ -419,7 +419,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
}
retry:
amdgpu_ttm_placement_from_domain(bo, domain);
amdgpu_bo_placement_from_domain(bo, domain);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
p->bytes_moved += ctx.bytes_moved;
@ -478,7 +478,7 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
update_bytes_moved_vis =
!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
amdgpu_bo_in_cpu_visible_vram(bo);
amdgpu_ttm_placement_from_domain(bo, other);
amdgpu_bo_placement_from_domain(bo, other);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
p->bytes_moved += ctx.bytes_moved;
if (update_bytes_moved_vis)
@ -532,8 +532,8 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
/* Check if we have user pages and nobody bound the BO already */
if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
lobj->user_pages) {
amdgpu_ttm_placement_from_domain(bo,
AMDGPU_GEM_DOMAIN_CPU);
amdgpu_bo_placement_from_domain(bo,
AMDGPU_GEM_DOMAIN_CPU);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
return r;
@ -1232,7 +1232,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job = p->job;
p->job = NULL;
r = drm_sched_job_init(&job->base, &ring->sched, entity, p->filp);
r = drm_sched_job_init(&job->base, entity, p->filp);
if (r) {
amdgpu_job_free(job);
amdgpu_mn_unlock(p->mn);
@ -1262,7 +1262,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
priority = job->base.s_priority;
drm_sched_entity_push_job(&job->base, entity);
ring = to_amdgpu_ring(entity->sched);
ring = to_amdgpu_ring(entity->rq->sched);
amdgpu_ring_priority_get(ring, priority);
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
@ -1655,7 +1655,7 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
(*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
amdgpu_ttm_placement_from_domain(*bo, (*bo)->allowed_domains);
amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
if (r)
return r;

View file

@ -104,8 +104,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
failed:
for (j = 0; j < i; j++)
drm_sched_entity_destroy(&adev->rings[j]->sched,
&ctx->rings[j].entity);
drm_sched_entity_destroy(&ctx->rings[j].entity);
kfree(ctx->fences);
ctx->fences = NULL;
return r;
@ -178,8 +177,7 @@ static void amdgpu_ctx_do_release(struct kref *ref)
if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
continue;
drm_sched_entity_destroy(&ctx->adev->rings[i]->sched,
&ctx->rings[i].entity);
drm_sched_entity_destroy(&ctx->rings[i].entity);
}
amdgpu_ctx_fini(ref);
@ -466,8 +464,8 @@ void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr)
if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
continue;
max_wait = drm_sched_entity_flush(&ctx->adev->rings[i]->sched,
&ctx->rings[i].entity, max_wait);
max_wait = drm_sched_entity_flush(&ctx->rings[i].entity,
max_wait);
}
}
mutex_unlock(&mgr->lock);
@ -492,8 +490,7 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
continue;
if (kref_read(&ctx->refcount) == 1)
drm_sched_entity_fini(&ctx->adev->rings[i]->sched,
&ctx->rings[i].entity);
drm_sched_entity_fini(&ctx->rings[i].entity);
else
DRM_ERROR("ctx %p is still alive\n", ctx);
}

View file

@ -1926,7 +1926,7 @@ static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
}
/**
* amdgpu_device_ip_suspend - run suspend for hardware IPs
* amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
*
* @adev: amdgpu_device pointer
*
@ -1936,7 +1936,55 @@ static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
* in each IP into a state suitable for suspend.
* Returns 0 on success, negative error code on failure.
*/
int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
{
int i, r;
if (amdgpu_sriov_vf(adev))
amdgpu_virt_request_full_gpu(adev, false);
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.valid)
continue;
/* displays are handled separately */
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) {
/* ungate blocks so that suspend can properly shut them down */
if (adev->ip_blocks[i].version->funcs->set_clockgating_state) {
r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
AMD_CG_STATE_UNGATE);
if (r) {
DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
}
}
/* XXX handle errors */
r = adev->ip_blocks[i].version->funcs->suspend(adev);
/* XXX handle errors */
if (r) {
DRM_ERROR("suspend of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
}
}
}
if (amdgpu_sriov_vf(adev))
amdgpu_virt_release_full_gpu(adev, false);
return 0;
}
/**
* amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
*
* @adev: amdgpu_device pointer
*
* Main suspend function for hardware IPs. The list of all the hardware
* IPs that make up the asic is walked, clockgating is disabled and the
* suspend callbacks are run. suspend puts the hardware and software state
* in each IP into a state suitable for suspend.
* Returns 0 on success, negative error code on failure.
*/
static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
{
int i, r;
@ -1957,6 +2005,9 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.valid)
continue;
/* displays are handled in phase1 */
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
continue;
/* ungate blocks so that suspend can properly shut them down */
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_SMC &&
adev->ip_blocks[i].version->funcs->set_clockgating_state) {
@ -1982,6 +2033,29 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
return 0;
}
/**
* amdgpu_device_ip_suspend - run suspend for hardware IPs
*
* @adev: amdgpu_device pointer
*
* Main suspend function for hardware IPs. The list of all the hardware
* IPs that make up the asic is walked, clockgating is disabled and the
* suspend callbacks are run. suspend puts the hardware and software state
* in each IP into a state suitable for suspend.
* Returns 0 on success, negative error code on failure.
*/
int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
{
int r;
r = amdgpu_device_ip_suspend_phase1(adev);
if (r)
return r;
r = amdgpu_device_ip_suspend_phase2(adev);
return r;
}
static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
{
int i, r;
@ -2004,7 +2078,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
continue;
r = block->version->funcs->hw_init(adev);
DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
if (r)
return r;
}
@ -2039,7 +2113,7 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
continue;
r = block->version->funcs->hw_init(adev);
DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
if (r)
return r;
}
@ -2628,6 +2702,9 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
drm_kms_helper_poll_disable(dev);
if (fbcon)
amdgpu_fbdev_set_suspend(adev, 1);
if (!amdgpu_device_has_dc_support(adev)) {
/* turn off display hw */
drm_modeset_lock_all(dev);
@ -2635,44 +2712,46 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
}
drm_modeset_unlock_all(dev);
/* unpin the front buffers and cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_framebuffer *fb = crtc->primary->fb;
struct amdgpu_bo *robj;
if (amdgpu_crtc->cursor_bo) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
r = amdgpu_bo_reserve(aobj, true);
if (r == 0) {
amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj);
}
}
if (fb == NULL || fb->obj[0] == NULL) {
continue;
}
robj = gem_to_amdgpu_bo(fb->obj[0]);
/* don't unpin kernel fb objects */
if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
r = amdgpu_bo_reserve(robj, true);
if (r == 0) {
amdgpu_bo_unpin(robj);
amdgpu_bo_unreserve(robj);
}
}
}
}
amdgpu_amdkfd_suspend(adev);
/* unpin the front buffers and cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_framebuffer *fb = crtc->primary->fb;
struct amdgpu_bo *robj;
r = amdgpu_device_ip_suspend_phase1(adev);
if (amdgpu_crtc->cursor_bo) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
r = amdgpu_bo_reserve(aobj, true);
if (r == 0) {
amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj);
}
}
if (fb == NULL || fb->obj[0] == NULL) {
continue;
}
robj = gem_to_amdgpu_bo(fb->obj[0]);
/* don't unpin kernel fb objects */
if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
r = amdgpu_bo_reserve(robj, true);
if (r == 0) {
amdgpu_bo_unpin(robj);
amdgpu_bo_unreserve(robj);
}
}
}
/* evict vram memory */
amdgpu_bo_evict_vram(adev);
amdgpu_fence_driver_suspend(adev);
r = amdgpu_device_ip_suspend(adev);
r = amdgpu_device_ip_suspend_phase2(adev);
/* evict remaining vram memory
* This second call to evict vram is to evict the gart page table
@ -2691,11 +2770,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
DRM_ERROR("amdgpu asic reset failed\n");
}
if (fbcon) {
console_lock();
amdgpu_fbdev_set_suspend(adev, 1);
console_unlock();
}
return 0;
}
@ -2720,15 +2794,12 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
if (fbcon)
console_lock();
if (resume) {
pci_set_power_state(dev->pdev, PCI_D0);
pci_restore_state(dev->pdev);
r = pci_enable_device(dev->pdev);
if (r)
goto unlock;
return r;
}
/* post card */
@ -2741,28 +2812,30 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
r = amdgpu_device_ip_resume(adev);
if (r) {
DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
goto unlock;
return r;
}
amdgpu_fence_driver_resume(adev);
r = amdgpu_device_ip_late_init(adev);
if (r)
goto unlock;
return r;
/* pin cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
if (!amdgpu_device_has_dc_support(adev)) {
/* pin cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
if (amdgpu_crtc->cursor_bo) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
r = amdgpu_bo_reserve(aobj, true);
if (r == 0) {
r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
if (r != 0)
DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
amdgpu_bo_unreserve(aobj);
if (amdgpu_crtc->cursor_bo) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
r = amdgpu_bo_reserve(aobj, true);
if (r == 0) {
r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
if (r != 0)
DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
amdgpu_bo_unreserve(aobj);
}
}
}
}
@ -2783,6 +2856,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
}
drm_modeset_unlock_all(dev);
}
amdgpu_fbdev_set_suspend(adev, 0);
}
drm_kms_helper_poll_enable(dev);
@ -2806,15 +2880,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
#ifdef CONFIG_PM
dev->dev->power.disable_depth--;
#endif
if (fbcon)
amdgpu_fbdev_set_suspend(adev, 0);
unlock:
if (fbcon)
console_unlock();
return r;
return 0;
}
/**
@ -3091,7 +3157,7 @@ static int amdgpu_device_handle_vram_lost(struct amdgpu_device *adev)
* @adev: amdgpu device pointer
*
* attempt to do soft-reset or full-reset and reinitialize Asic
* return 0 means successed otherwise failed
* return 0 means succeeded otherwise failed
*/
static int amdgpu_device_reset(struct amdgpu_device *adev)
{
@ -3169,7 +3235,7 @@ static int amdgpu_device_reset(struct amdgpu_device *adev)
* @from_hypervisor: request from hypervisor
*
* do VF FLR and reinitialize Asic
* return 0 means successed otherwise failed
* return 0 means succeeded otherwise failed
*/
static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
bool from_hypervisor)
@ -3294,7 +3360,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
} else {
dev_info(adev->dev, "GPU reset(%d) successed!\n",atomic_read(&adev->gpu_reset_counter));
dev_info(adev->dev, "GPU reset(%d) succeeded!\n",atomic_read(&adev->gpu_reset_counter));
}
amdgpu_vf_error_trans_all(adev);

View file

@ -373,8 +373,8 @@ void amdgpu_fbdev_fini(struct amdgpu_device *adev)
void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state)
{
if (adev->mode_info.rfbdev)
drm_fb_helper_set_suspend(&adev->mode_info.rfbdev->helper,
state);
drm_fb_helper_set_suspend_unlocked(&adev->mode_info.rfbdev->helper,
state);
}
int amdgpu_fbdev_total_size(struct amdgpu_device *adev)

View file

@ -344,7 +344,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
if (r)
goto free_pages;
amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
amdgpu_bo_unreserve(bo);
if (r)

View file

@ -133,7 +133,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
if (!f)
return -EINVAL;
r = drm_sched_job_init(&job->base, entity->sched, entity, owner);
r = drm_sched_job_init(&job->base, entity, owner);
if (r)
return r;
@ -143,7 +143,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
priority = job->base.s_priority;
drm_sched_entity_push_job(&job->base, entity);
ring = to_amdgpu_ring(entity->sched);
ring = to_amdgpu_ring(entity->rq->sched);
amdgpu_ring_priority_get(ring, priority);
return 0;
@ -167,7 +167,7 @@ int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
struct drm_sched_entity *s_entity)
{
struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->sched);
struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
struct amdgpu_job *job = to_amdgpu_job(sched_job);
struct amdgpu_vm *vm = job->vm;
struct dma_fence *fence;

View file

@ -286,7 +286,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
struct drm_crtc *crtc;
uint32_t ui32 = 0;
uint64_t ui64 = 0;
int i, j, found;
int i, found;
int ui32_size = sizeof(ui32);
if (!info->return_size || !info->return_pointer)
@ -328,64 +328,61 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
case AMDGPU_HW_IP_GFX:
type = AMD_IP_BLOCK_TYPE_GFX;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
ring_mask |= ((adev->gfx.gfx_ring[i].ready ? 1 : 0) << i);
ring_mask |= adev->gfx.gfx_ring[i].ready << i;
ib_start_alignment = 32;
ib_size_alignment = 32;
break;
case AMDGPU_HW_IP_COMPUTE:
type = AMD_IP_BLOCK_TYPE_GFX;
for (i = 0; i < adev->gfx.num_compute_rings; i++)
ring_mask |= ((adev->gfx.compute_ring[i].ready ? 1 : 0) << i);
ring_mask |= adev->gfx.compute_ring[i].ready << i;
ib_start_alignment = 32;
ib_size_alignment = 32;
break;
case AMDGPU_HW_IP_DMA:
type = AMD_IP_BLOCK_TYPE_SDMA;
for (i = 0; i < adev->sdma.num_instances; i++)
ring_mask |= ((adev->sdma.instance[i].ring.ready ? 1 : 0) << i);
ring_mask |= adev->sdma.instance[i].ring.ready << i;
ib_start_alignment = 256;
ib_size_alignment = 4;
break;
case AMDGPU_HW_IP_UVD:
type = AMD_IP_BLOCK_TYPE_UVD;
for (i = 0; i < adev->uvd.num_uvd_inst; i++)
ring_mask |= ((adev->uvd.inst[i].ring.ready ? 1 : 0) << i);
ring_mask |= adev->uvd.inst[0].ring.ready;
ib_start_alignment = 64;
ib_size_alignment = 64;
break;
case AMDGPU_HW_IP_VCE:
type = AMD_IP_BLOCK_TYPE_VCE;
for (i = 0; i < adev->vce.num_rings; i++)
ring_mask |= ((adev->vce.ring[i].ready ? 1 : 0) << i);
ring_mask |= adev->vce.ring[i].ready << i;
ib_start_alignment = 4;
ib_size_alignment = 1;
break;
case AMDGPU_HW_IP_UVD_ENC:
type = AMD_IP_BLOCK_TYPE_UVD;
for (i = 0; i < adev->uvd.num_uvd_inst; i++)
for (j = 0; j < adev->uvd.num_enc_rings; j++)
ring_mask |=
((adev->uvd.inst[i].ring_enc[j].ready ? 1 : 0) <<
(j + i * adev->uvd.num_enc_rings));
for (i = 0; i < adev->uvd.num_enc_rings; i++)
ring_mask |=
adev->uvd.inst[0].ring_enc[i].ready << i;
ib_start_alignment = 64;
ib_size_alignment = 64;
break;
case AMDGPU_HW_IP_VCN_DEC:
type = AMD_IP_BLOCK_TYPE_VCN;
ring_mask = adev->vcn.ring_dec.ready ? 1 : 0;
ring_mask = adev->vcn.ring_dec.ready;
ib_start_alignment = 16;
ib_size_alignment = 16;
break;
case AMDGPU_HW_IP_VCN_ENC:
type = AMD_IP_BLOCK_TYPE_VCN;
for (i = 0; i < adev->vcn.num_enc_rings; i++)
ring_mask |= ((adev->vcn.ring_enc[i].ready ? 1 : 0) << i);
ring_mask |= adev->vcn.ring_enc[i].ready << i;
ib_start_alignment = 64;
ib_size_alignment = 1;
break;
case AMDGPU_HW_IP_VCN_JPEG:
type = AMD_IP_BLOCK_TYPE_VCN;
ring_mask = adev->vcn.ring_jpeg.ready ? 1 : 0;
ring_mask = adev->vcn.ring_jpeg.ready;
ib_start_alignment = 16;
ib_size_alignment = 16;
break;

View file

@ -51,7 +51,7 @@
*
*/
static bool amdgpu_need_backup(struct amdgpu_device *adev)
static bool amdgpu_bo_need_backup(struct amdgpu_device *adev)
{
if (adev->flags & AMD_IS_APU)
return false;
@ -84,12 +84,12 @@ static void amdgpu_bo_subtract_pin_size(struct amdgpu_bo *bo)
}
}
static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
if (WARN_ON_ONCE(bo->pin_count > 0))
if (bo->pin_count > 0)
amdgpu_bo_subtract_pin_size(bo);
if (bo->kfd_bo)
@ -111,7 +111,7 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
}
/**
* amdgpu_ttm_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo
* amdgpu_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo
* @bo: buffer object to be checked
*
* Uses destroy function associated with the object to determine if this is
@ -120,22 +120,22 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
* Returns:
* true if the object belongs to &amdgpu_bo, false if not.
*/
bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
{
if (bo->destroy == &amdgpu_ttm_bo_destroy)
if (bo->destroy == &amdgpu_bo_destroy)
return true;
return false;
}
/**
* amdgpu_ttm_placement_from_domain - set buffer's placement
* amdgpu_bo_placement_from_domain - set buffer's placement
* @abo: &amdgpu_bo buffer object whose placement is to be set
* @domain: requested domain
*
* Sets buffer's placement according to requested domain and the buffer's
* flags.
*/
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
struct ttm_placement *placement = &abo->placement;
@ -216,6 +216,8 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
c++;
}
BUG_ON(c >= AMDGPU_BO_MAX_PLACEMENTS);
placement->num_placement = c;
placement->placement = places;
@ -488,13 +490,13 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
#endif
bo->tbo.bdev = &adev->mman.bdev;
amdgpu_ttm_placement_from_domain(bo, bp->domain);
amdgpu_bo_placement_from_domain(bo, bp->domain);
if (bp->type == ttm_bo_type_kernel)
bo->tbo.priority = 1;
r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
&bo->placement, page_align, &ctx, acc_size,
NULL, bp->resv, &amdgpu_ttm_bo_destroy);
NULL, bp->resv, &amdgpu_bo_destroy);
if (unlikely(r != 0))
return r;
@ -594,7 +596,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
if (r)
return r;
if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_need_backup(adev)) {
if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_bo_need_backup(adev)) {
if (!bp->resv)
WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv,
NULL));
@ -682,7 +684,7 @@ int amdgpu_bo_validate(struct amdgpu_bo *bo)
domain = bo->preferred_domains;
retry:
amdgpu_ttm_placement_from_domain(bo, domain);
amdgpu_bo_placement_from_domain(bo, domain);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
domain = bo->allowed_domains;
@ -915,7 +917,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
/* force to pin into visible video ram */
if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
amdgpu_ttm_placement_from_domain(bo, domain);
amdgpu_bo_placement_from_domain(bo, domain);
for (i = 0; i < bo->placement.num_placement; i++) {
unsigned fpfn, lpfn;
@ -1246,7 +1248,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
struct amdgpu_bo *abo;
struct ttm_mem_reg *old_mem = &bo->mem;
if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
if (!amdgpu_bo_is_amdgpu_bo(bo))
return;
abo = ttm_to_amdgpu_bo(bo);
@ -1263,7 +1265,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
return;
/* move_notify is called before move happens */
trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
}
/**
@ -1285,7 +1287,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
unsigned long offset, size;
int r;
if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
if (!amdgpu_bo_is_amdgpu_bo(bo))
return 0;
abo = ttm_to_amdgpu_bo(bo);
@ -1307,8 +1309,8 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
/* hurrah the memory is not visible ! */
atomic64_inc(&adev->num_vram_cpu_page_faults);
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT);
amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT);
/* Avoid costly evictions; only set GTT as a busy placement */
abo->placement.num_busy_placement = 1;

View file

@ -32,6 +32,7 @@
#include "amdgpu.h"
#define AMDGPU_BO_INVALID_OFFSET LONG_MAX
#define AMDGPU_BO_MAX_PLACEMENTS 3
struct amdgpu_bo_param {
unsigned long size;
@ -77,7 +78,7 @@ struct amdgpu_bo {
/* Protected by tbo.reserved */
u32 preferred_domains;
u32 allowed_domains;
struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
struct ttm_place placements[AMDGPU_BO_MAX_PLACEMENTS];
struct ttm_placement placement;
struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap;
@ -234,6 +235,9 @@ static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
}
bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
int amdgpu_bo_create(struct amdgpu_device *adev,
struct amdgpu_bo_param *bp,
struct amdgpu_bo **bo_ptr);

View file

@ -323,7 +323,7 @@ static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
return ret;
if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
}

View file

@ -66,8 +66,6 @@ static int amdgpu_identity_map(struct amdgpu_device *adev,
u32 ring,
struct amdgpu_ring **out_ring)
{
u32 instance;
switch (mapper->hw_ip) {
case AMDGPU_HW_IP_GFX:
*out_ring = &adev->gfx.gfx_ring[ring];
@ -79,16 +77,13 @@ static int amdgpu_identity_map(struct amdgpu_device *adev,
*out_ring = &adev->sdma.instance[ring].ring;
break;
case AMDGPU_HW_IP_UVD:
instance = ring;
*out_ring = &adev->uvd.inst[instance].ring;
*out_ring = &adev->uvd.inst[0].ring;
break;
case AMDGPU_HW_IP_VCE:
*out_ring = &adev->vce.ring[ring];
break;
case AMDGPU_HW_IP_UVD_ENC:
instance = ring / adev->uvd.num_enc_rings;
*out_ring =
&adev->uvd.inst[instance].ring_enc[ring%adev->uvd.num_enc_rings];
*out_ring = &adev->uvd.inst[0].ring_enc[ring];
break;
case AMDGPU_HW_IP_VCN_DEC:
*out_ring = &adev->vcn.ring_dec;

View file

@ -436,7 +436,7 @@ TRACE_EVENT(amdgpu_cs_bo_status,
__entry->total_bo, __entry->total_size)
);
TRACE_EVENT(amdgpu_ttm_bo_move,
TRACE_EVENT(amdgpu_bo_move,
TP_PROTO(struct amdgpu_bo* bo, uint32_t new_placement, uint32_t old_placement),
TP_ARGS(bo, new_placement, old_placement),
TP_STRUCT__entry(

View file

@ -248,7 +248,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
}
/* Object isn't an AMDGPU object so ignore */
if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) {
if (!amdgpu_bo_is_amdgpu_bo(bo)) {
placement->placement = &placements;
placement->busy_placement = &placements;
placement->num_placement = 1;
@ -261,7 +261,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
case TTM_PL_VRAM:
if (!adev->mman.buffer_funcs_enabled) {
/* Move to system memory */
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
} else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
!(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
amdgpu_bo_in_cpu_visible_vram(abo)) {
@ -271,7 +271,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
* BO will be evicted to GTT rather than causing other
* BOs to be evicted from VRAM
*/
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT);
abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
abo->placements[0].lpfn = 0;
@ -279,12 +279,12 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
abo->placement.num_busy_placement = 1;
} else {
/* Move to GTT memory */
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
}
break;
case TTM_PL_TT:
default:
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
}
*placement = abo->placement;
}
@ -1925,8 +1925,9 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
return;
}
} else {
drm_sched_entity_destroy(adev->mman.entity.sched,
&adev->mman.entity);
drm_sched_entity_destroy(&adev->mman.entity);
dma_fence_put(man->move);
man->move = NULL;
}
/* this just adjusts TTM size idea, which sets lpfn to the correct value */

View file

@ -305,8 +305,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
{
int i, j;
drm_sched_entity_destroy(&adev->uvd.inst->ring.sched,
&adev->uvd.entity);
drm_sched_entity_destroy(&adev->uvd.entity);
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
kfree(adev->uvd.inst[j].saved_bo);
@ -473,7 +472,7 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
if (cmd == 0x0 || cmd == 0x3) {
/* yes, force it into VRAM */
uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
amdgpu_ttm_placement_from_domain(bo, domain);
amdgpu_bo_placement_from_domain(bo, domain);
}
amdgpu_uvd_force_into_uvd_segment(bo);
@ -1014,7 +1013,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
if (!ring->adev->uvd.address_64_bit) {
struct ttm_operation_ctx ctx = { true, false };
amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
amdgpu_uvd_force_into_uvd_segment(bo);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)

View file

@ -221,7 +221,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
if (adev->vce.vcpu_bo == NULL)
return 0;
drm_sched_entity_destroy(&adev->vce.ring[0].sched, &adev->vce.entity);
drm_sched_entity_destroy(&adev->vce.entity);
amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
(void **)&adev->vce.cpu_addr);

View file

@ -387,7 +387,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
ats_entries = 0;
}
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
r = reservation_object_reserve_shared(bo->tbo.resv);
if (r)
@ -1113,7 +1113,7 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
struct amdgpu_ring *ring;
struct dma_fence *fence;
ring = container_of(vm->entity.sched, struct amdgpu_ring,
ring = container_of(vm->entity.rq->sched, struct amdgpu_ring,
sched);
amdgpu_ring_pad_ib(ring, params.ib);
@ -1403,7 +1403,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
addr, flags);
}
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
nptes = last - start + 1;
@ -2642,7 +2642,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->root.base.bo = NULL;
error_free_sched_entity:
drm_sched_entity_destroy(&ring->sched, &vm->entity);
drm_sched_entity_destroy(&vm->entity);
return r;
}
@ -2779,7 +2779,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
}
drm_sched_entity_destroy(vm->entity.sched, &vm->entity);
drm_sched_entity_destroy(&vm->entity);
if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
dev_err(adev->dev, "still active bo inside vm\n");

View file

@ -1476,7 +1476,7 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
tmp |= PCIE_LC_CNTL4__LC_REDO_EQ_MASK;
WREG32_PCIE(ixPCIE_LC_CNTL4, tmp);
mdelay(100);
msleep(100);
/* linkctl */
pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);

View file

@ -3490,7 +3490,7 @@ static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
/* wait for RLC_SAFE_MODE */
for (i = 0; i < adev->usec_timeout; i++) {
if (!REG_GET_FIELD(SOC15_REG_OFFSET(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
break;
udelay(1);
}

View file

@ -269,7 +269,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
entry->src_id, entry->ring_id, entry->vmid,
entry->pasid, task_info.process_name, task_info.tgid,
task_info.task_name, task_info.pid);
dev_err(adev->dev, " at page 0x%016llx from %d\n",
dev_err(adev->dev, " at address 0x%016llx from %d\n",
addr, entry->client_id);
if (!amdgpu_sriov_vf(adev))
dev_err(adev->dev,

View file

@ -1532,10 +1532,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
DRM_ERROR("DM: Failed to initialize IRQ\n");
goto fail;
}
/*
* Temporary disable until pplib/smu interaction is implemented
*/
dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
break;
#endif
default:
@ -1543,6 +1539,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
goto fail;
}
if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
return 0;
fail:
kfree(aencoder);
@ -1574,18 +1573,6 @@ static void dm_bandwidth_update(struct amdgpu_device *adev)
/* TODO: implement later */
}
static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
u8 level)
{
/* TODO: translate amdgpu_encoder to display_index and call DAL */
}
static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
{
/* TODO: translate amdgpu_encoder to display_index and call DAL */
return 0;
}
static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
struct drm_file *filp)
{
@ -1614,10 +1601,8 @@ static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
static const struct amdgpu_display_funcs dm_display_funcs = {
.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
.backlight_set_level =
dm_set_backlight_level,/* called unconditionally */
.backlight_get_level =
dm_get_backlight_level,/* called unconditionally */
.backlight_set_level = NULL, /* never called for DC */
.backlight_get_level = NULL, /* never called for DC */
.hpd_sense = NULL,/* called unconditionally */
.hpd_set_polarity = NULL, /* called unconditionally */
.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */

View file

@ -469,8 +469,8 @@ bool dm_pp_get_static_clocks(
return false;
static_clk_info->max_clocks_state = pp_to_dc_powerlevel_state(pp_clk_info.max_clocks_state);
static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock;
static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock;
static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock * 10;
static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock * 10;
return true;
}

View file

@ -676,7 +676,7 @@ static void hack_force_pipe_split(struct dcn_bw_internal_vars *v,
}
static void hack_bounding_box(struct dcn_bw_internal_vars *v,
struct dc_debug *dbg,
struct dc_debug_options *dbg,
struct dc_state *context)
{
if (dbg->pipe_split_policy == MPC_SPLIT_AVOID)

View file

@ -760,7 +760,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
*/
/* deal with non-mst cases */
dp_hbr_verify_link_cap(link, &link->reported_link_cap);
dp_verify_link_cap(link, &link->reported_link_cap);
}
/* HDMI-DVI Dongle */

View file

@ -33,10 +33,8 @@
#include "include/vector.h"
#include "core_types.h"
#include "dc_link_ddc.h"
#include "i2caux/engine.h"
#include "i2caux/i2c_engine.h"
#include "i2caux/aux_engine.h"
#include "i2caux/i2caux.h"
#include "engine.h"
#include "aux_engine.h"
#define AUX_POWER_UP_WA_DELAY 500
#define I2C_OVER_AUX_DEFER_WA_DELAY 70
@ -641,9 +639,9 @@ int dc_link_aux_transfer(struct ddc_service *ddc,
enum aux_transaction_type type,
enum i2caux_transaction_action action)
{
struct i2caux *i2caux = ddc->ctx->i2caux;
struct ddc *ddc_pin = ddc->ddc_pin;
struct aux_engine *engine;
struct engine *engine;
struct aux_engine *aux_engine;
enum aux_channel_operation_result operation_result;
struct aux_request_transaction_data aux_req;
struct aux_reply_transaction_data aux_rep;
@ -654,7 +652,8 @@ int dc_link_aux_transfer(struct ddc_service *ddc,
memset(&aux_req, 0, sizeof(aux_req));
memset(&aux_rep, 0, sizeof(aux_rep));
engine = i2caux->funcs->acquire_aux_engine(i2caux, ddc_pin);
engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
aux_engine = engine->funcs->acquire(engine, ddc_pin);
aux_req.type = type;
aux_req.action = action;
@ -664,15 +663,15 @@ int dc_link_aux_transfer(struct ddc_service *ddc,
aux_req.length = size;
aux_req.data = buffer;
engine->funcs->submit_channel_request(engine, &aux_req);
operation_result = engine->funcs->get_channel_status(engine, &returned_bytes);
aux_engine->funcs->submit_channel_request(aux_engine, &aux_req);
operation_result = aux_engine->funcs->get_channel_status(aux_engine, &returned_bytes);
switch (operation_result) {
case AUX_CHANNEL_OPERATION_SUCCEEDED:
res = returned_bytes;
if (res <= size && res >= 0)
res = engine->funcs->read_channel_reply(engine, size,
res = aux_engine->funcs->read_channel_reply(aux_engine, size,
buffer, reply,
&status);
@ -686,8 +685,7 @@ int dc_link_aux_transfer(struct ddc_service *ddc,
res = -1;
break;
}
i2caux->funcs->release_engine(i2caux, &engine->base);
aux_engine->base.funcs->release_engine(&aux_engine->base);
return res;
}

View file

@ -1029,7 +1029,7 @@ enum link_training_result dc_link_dp_perform_link_training(
lt_settings.lane_settings[0].PRE_EMPHASIS);
if (status != LINK_TRAINING_SUCCESS)
link->ctx->dc->debug.debug_data.ltFailCount++;
link->ctx->dc->debug_data.ltFailCount++;
return status;
}
@ -1086,7 +1086,7 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
return max_link_cap;
}
bool dp_hbr_verify_link_cap(
bool dp_verify_link_cap(
struct dc_link *link,
struct dc_link_settings *known_limit_link_setting)
{
@ -1101,6 +1101,11 @@ bool dp_hbr_verify_link_cap(
enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL;
enum link_training_result status;
if (link->dc->debug.skip_detection_link_training) {
link->verified_link_cap = *known_limit_link_setting;
return true;
}
success = false;
skip_link_training = false;

View file

@ -38,7 +38,7 @@
#include "inc/compressor.h"
#include "dml/display_mode_lib.h"
#define DC_VER "3.1.56"
#define DC_VER "3.1.58"
#define MAX_SURFACES 3
#define MAX_STREAMS 6
@ -207,7 +207,7 @@ struct dc_clocks {
int phyclk_khz;
};
struct dc_debug {
struct dc_debug_options {
enum visual_confirm visual_confirm;
bool sanity_checks;
bool max_disp_clk;
@ -258,13 +258,16 @@ struct dc_debug {
bool avoid_vbios_exec_table;
bool scl_reset_length10;
bool hdmi20_disable;
struct {
uint32_t ltFailCount;
uint32_t i2cErrorCount;
uint32_t auxErrorCount;
} debug_data;
bool skip_detection_link_training;
};
struct dc_debug_data {
uint32_t ltFailCount;
uint32_t i2cErrorCount;
uint32_t auxErrorCount;
};
struct dc_state;
struct resource_pool;
struct dce_hwseq;
@ -273,8 +276,7 @@ struct dc {
struct dc_caps caps;
struct dc_cap_funcs cap_funcs;
struct dc_config config;
struct dc_debug debug;
struct dc_debug_options debug;
struct dc_context *ctx;
uint8_t link_count;
@ -310,6 +312,8 @@ struct dc {
/* FBC compressor */
struct compressor *fbc_compressor;
struct dc_debug_data debug_data;
};
enum frame_buffer_mode {

View file

@ -192,7 +192,7 @@ enum surface_pixel_format {
/*swaped & float*/
SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F,
/*grow graphics here if necessary */
SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888,
SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr =
SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,

View file

@ -28,7 +28,7 @@
DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \
dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \
dce_clocks.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o
dce_clocks.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o
AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE))

View file

@ -0,0 +1,942 @@
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "dce_aux.h"
#include "dce/dce_11_0_sh_mask.h"
#define CTX \
aux110->base.base.ctx
#define REG(reg_name)\
(aux110->regs->reg_name)
#define DC_LOGGER \
engine->base.ctx->logger
#include "reg_helper.h"
#define FROM_AUX_ENGINE(ptr) \
container_of((ptr), struct aux_engine_dce110, base)
#define FROM_ENGINE(ptr) \
FROM_AUX_ENGINE(container_of((ptr), struct aux_engine, base))
#define FROM_AUX_ENGINE_ENGINE(ptr) \
container_of((ptr), struct aux_engine, base)
enum {
AUX_INVALID_REPLY_RETRY_COUNTER = 1,
AUX_TIMED_OUT_RETRY_COUNTER = 2,
AUX_DEFER_RETRY_COUNTER = 6
};
static void release_engine(
struct engine *engine)
{
struct aux_engine_dce110 *aux110 = FROM_ENGINE(engine);
dal_ddc_close(engine->ddc);
engine->ddc = NULL;
REG_UPDATE(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, 1);
}
#define SW_CAN_ACCESS_AUX 1
#define DMCU_CAN_ACCESS_AUX 2
static bool is_engine_available(
struct aux_engine *engine)
{
struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
uint32_t value = REG_READ(AUX_ARB_CONTROL);
uint32_t field = get_reg_field_value(
value,
AUX_ARB_CONTROL,
AUX_REG_RW_CNTL_STATUS);
return (field != DMCU_CAN_ACCESS_AUX);
}
static bool acquire_engine(
struct aux_engine *engine)
{
struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
uint32_t value = REG_READ(AUX_ARB_CONTROL);
uint32_t field = get_reg_field_value(
value,
AUX_ARB_CONTROL,
AUX_REG_RW_CNTL_STATUS);
if (field == DMCU_CAN_ACCESS_AUX)
return false;
/* enable AUX before request SW to access AUX */
value = REG_READ(AUX_CONTROL);
field = get_reg_field_value(value,
AUX_CONTROL,
AUX_EN);
if (field == 0) {
set_reg_field_value(
value,
1,
AUX_CONTROL,
AUX_EN);
if (REG(AUX_RESET_MASK)) {
/*DP_AUX block as part of the enable sequence*/
set_reg_field_value(
value,
1,
AUX_CONTROL,
AUX_RESET);
}
REG_WRITE(AUX_CONTROL, value);
if (REG(AUX_RESET_MASK)) {
/*poll HW to make sure reset it done*/
REG_WAIT(AUX_CONTROL, AUX_RESET_DONE, 1,
1, 11);
set_reg_field_value(
value,
0,
AUX_CONTROL,
AUX_RESET);
REG_WRITE(AUX_CONTROL, value);
REG_WAIT(AUX_CONTROL, AUX_RESET_DONE, 0,
1, 11);
}
} /*if (field)*/
/* request SW to access AUX */
REG_UPDATE(AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, 1);
value = REG_READ(AUX_ARB_CONTROL);
field = get_reg_field_value(
value,
AUX_ARB_CONTROL,
AUX_REG_RW_CNTL_STATUS);
return (field == SW_CAN_ACCESS_AUX);
}
#define COMPOSE_AUX_SW_DATA_16_20(command, address) \
((command) | ((0xF0000 & (address)) >> 16))
#define COMPOSE_AUX_SW_DATA_8_15(address) \
((0xFF00 & (address)) >> 8)
#define COMPOSE_AUX_SW_DATA_0_7(address) \
(0xFF & (address))
static void submit_channel_request(
struct aux_engine *engine,
struct aux_request_transaction_data *request)
{
struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
uint32_t value;
uint32_t length;
bool is_write =
((request->type == AUX_TRANSACTION_TYPE_DP) &&
(request->action == I2CAUX_TRANSACTION_ACTION_DP_WRITE)) ||
((request->type == AUX_TRANSACTION_TYPE_I2C) &&
((request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE) ||
(request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT)));
if (REG(AUXN_IMPCAL)) {
/* clear_aux_error */
REG_UPDATE_SEQ(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK,
1,
0);
REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK,
1,
0);
/* force_default_calibrate */
REG_UPDATE_1BY1_2(AUXN_IMPCAL,
AUXN_IMPCAL_ENABLE, 1,
AUXN_IMPCAL_OVERRIDE_ENABLE, 0);
/* bug? why AUXN update EN and OVERRIDE_EN 1 by 1 while AUX P toggles OVERRIDE? */
REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE,
1,
0);
}
/* set the delay and the number of bytes to write */
/* The length include
* the 4 bit header and the 20 bit address
* (that is 3 byte).
* If the requested length is non zero this means
* an addition byte specifying the length is required.
*/
length = request->length ? 4 : 3;
if (is_write)
length += request->length;
REG_UPDATE_2(AUX_SW_CONTROL,
AUX_SW_START_DELAY, request->delay,
AUX_SW_WR_BYTES, length);
/* program action and address and payload data (if 'is_write') */
value = REG_UPDATE_4(AUX_SW_DATA,
AUX_SW_INDEX, 0,
AUX_SW_DATA_RW, 0,
AUX_SW_AUTOINCREMENT_DISABLE, 1,
AUX_SW_DATA, COMPOSE_AUX_SW_DATA_16_20(request->action, request->address));
value = REG_SET_2(AUX_SW_DATA, value,
AUX_SW_AUTOINCREMENT_DISABLE, 0,
AUX_SW_DATA, COMPOSE_AUX_SW_DATA_8_15(request->address));
value = REG_SET(AUX_SW_DATA, value,
AUX_SW_DATA, COMPOSE_AUX_SW_DATA_0_7(request->address));
if (request->length) {
value = REG_SET(AUX_SW_DATA, value,
AUX_SW_DATA, request->length - 1);
}
if (is_write) {
/* Load the HW buffer with the Data to be sent.
* This is relevant for write operation.
* For read, the data recived data will be
* processed in process_channel_reply().
*/
uint32_t i = 0;
while (i < request->length) {
value = REG_SET(AUX_SW_DATA, value,
AUX_SW_DATA, request->data[i]);
++i;
}
}
REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1);
REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0,
10, aux110->timeout_period/10);
REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1);
}
static int read_channel_reply(struct aux_engine *engine, uint32_t size,
uint8_t *buffer, uint8_t *reply_result,
uint32_t *sw_status)
{
struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
uint32_t bytes_replied;
uint32_t reply_result_32;
*sw_status = REG_GET(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT,
&bytes_replied);
/* In case HPD is LOW, exit AUX transaction */
if ((*sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
return -1;
/* Need at least the status byte */
if (!bytes_replied)
return -1;
REG_UPDATE_1BY1_3(AUX_SW_DATA,
AUX_SW_INDEX, 0,
AUX_SW_AUTOINCREMENT_DISABLE, 1,
AUX_SW_DATA_RW, 1);
REG_GET(AUX_SW_DATA, AUX_SW_DATA, &reply_result_32);
reply_result_32 = reply_result_32 >> 4;
*reply_result = (uint8_t)reply_result_32;
if (reply_result_32 == 0) { /* ACK */
uint32_t i = 0;
/* First byte was already used to get the command status */
--bytes_replied;
/* Do not overflow buffer */
if (bytes_replied > size)
return -1;
while (i < bytes_replied) {
uint32_t aux_sw_data_val;
REG_GET(AUX_SW_DATA, AUX_SW_DATA, &aux_sw_data_val);
buffer[i] = aux_sw_data_val;
++i;
}
return i;
}
return 0;
}
static void process_channel_reply(
struct aux_engine *engine,
struct aux_reply_transaction_data *reply)
{
int bytes_replied;
uint8_t reply_result;
uint32_t sw_status;
bytes_replied = read_channel_reply(engine, reply->length, reply->data,
&reply_result, &sw_status);
/* in case HPD is LOW, exit AUX transaction */
if ((sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
reply->status = AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON;
return;
}
if (bytes_replied < 0) {
/* Need to handle an error case...
* Hopefully, upper layer function won't call this function if
* the number of bytes in the reply was 0, because there was
* surely an error that was asserted that should have been
* handled for hot plug case, this could happens
*/
if (!(sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
reply->status = AUX_TRANSACTION_REPLY_INVALID;
ASSERT_CRITICAL(false);
return;
}
} else {
switch (reply_result) {
case 0: /* ACK */
reply->status = AUX_TRANSACTION_REPLY_AUX_ACK;
break;
case 1: /* NACK */
reply->status = AUX_TRANSACTION_REPLY_AUX_NACK;
break;
case 2: /* DEFER */
reply->status = AUX_TRANSACTION_REPLY_AUX_DEFER;
break;
case 4: /* AUX ACK / I2C NACK */
reply->status = AUX_TRANSACTION_REPLY_I2C_NACK;
break;
case 8: /* AUX ACK / I2C DEFER */
reply->status = AUX_TRANSACTION_REPLY_I2C_DEFER;
break;
default:
reply->status = AUX_TRANSACTION_REPLY_INVALID;
}
}
}
static enum aux_channel_operation_result get_channel_status(
struct aux_engine *engine,
uint8_t *returned_bytes)
{
struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
uint32_t value;
if (returned_bytes == NULL) {
/*caller pass NULL pointer*/
ASSERT_CRITICAL(false);
return AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN;
}
*returned_bytes = 0;
/* poll to make sure that SW_DONE is asserted */
value = REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 1,
10, aux110->timeout_period/10);
/* in case HPD is LOW, exit AUX transaction */
if ((value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
return AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON;
/* Note that the following bits are set in 'status.bits'
* during CTS 4.2.1.2 (FW 3.3.1):
* AUX_SW_RX_MIN_COUNT_VIOL, AUX_SW_RX_INVALID_STOP,
* AUX_SW_RX_RECV_NO_DET, AUX_SW_RX_RECV_INVALID_H.
*
* AUX_SW_RX_MIN_COUNT_VIOL is an internal,
* HW debugging bit and should be ignored.
*/
if (value & AUX_SW_STATUS__AUX_SW_DONE_MASK) {
if ((value & AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK) ||
(value & AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK))
return AUX_CHANNEL_OPERATION_FAILED_TIMEOUT;
else if ((value & AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK) ||
(value & AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK) ||
(value &
AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK) ||
(value & AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK))
return AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY;
*returned_bytes = get_reg_field_value(value,
AUX_SW_STATUS,
AUX_SW_REPLY_BYTE_COUNT);
if (*returned_bytes == 0)
return
AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY;
else {
*returned_bytes -= 1;
return AUX_CHANNEL_OPERATION_SUCCEEDED;
}
} else {
/*time_elapsed >= aux_engine->timeout_period
* AUX_SW_STATUS__AUX_SW_HPD_DISCON = at this point
*/
ASSERT_CRITICAL(false);
return AUX_CHANNEL_OPERATION_FAILED_TIMEOUT;
}
}
static void process_read_reply(
struct aux_engine *engine,
struct read_command_context *ctx)
{
engine->funcs->process_channel_reply(engine, &ctx->reply);
switch (ctx->reply.status) {
case AUX_TRANSACTION_REPLY_AUX_ACK:
ctx->defer_retry_aux = 0;
if (ctx->returned_byte > ctx->current_read_length) {
ctx->status =
I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
ctx->operation_succeeded = false;
} else if (ctx->returned_byte < ctx->current_read_length) {
ctx->current_read_length -= ctx->returned_byte;
ctx->offset += ctx->returned_byte;
++ctx->invalid_reply_retry_aux_on_ack;
if (ctx->invalid_reply_retry_aux_on_ack >
AUX_INVALID_REPLY_RETRY_COUNTER) {
ctx->status =
I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
ctx->operation_succeeded = false;
}
} else {
ctx->status = I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
ctx->transaction_complete = true;
ctx->operation_succeeded = true;
}
break;
case AUX_TRANSACTION_REPLY_AUX_NACK:
ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_NACK;
ctx->operation_succeeded = false;
break;
case AUX_TRANSACTION_REPLY_AUX_DEFER:
++ctx->defer_retry_aux;
if (ctx->defer_retry_aux > AUX_DEFER_RETRY_COUNTER) {
ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
ctx->operation_succeeded = false;
}
break;
case AUX_TRANSACTION_REPLY_I2C_DEFER:
ctx->defer_retry_aux = 0;
++ctx->defer_retry_i2c;
if (ctx->defer_retry_i2c > AUX_DEFER_RETRY_COUNTER) {
ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
ctx->operation_succeeded = false;
}
break;
case AUX_TRANSACTION_REPLY_HPD_DISCON:
ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
ctx->operation_succeeded = false;
break;
default:
ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
ctx->operation_succeeded = false;
}
}
static void process_read_request(
struct aux_engine *engine,
struct read_command_context *ctx)
{
enum aux_channel_operation_result operation_result;
engine->funcs->submit_channel_request(engine, &ctx->request);
operation_result = engine->funcs->get_channel_status(
engine, &ctx->returned_byte);
switch (operation_result) {
case AUX_CHANNEL_OPERATION_SUCCEEDED:
if (ctx->returned_byte > ctx->current_read_length) {
ctx->status =
I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
ctx->operation_succeeded = false;
} else {
ctx->timed_out_retry_aux = 0;
ctx->invalid_reply_retry_aux = 0;
ctx->reply.length = ctx->returned_byte;
ctx->reply.data = ctx->buffer;
process_read_reply(engine, ctx);
}
break;
case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
++ctx->invalid_reply_retry_aux;
if (ctx->invalid_reply_retry_aux >
AUX_INVALID_REPLY_RETRY_COUNTER) {
ctx->status =
I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
ctx->operation_succeeded = false;
} else
udelay(400);
break;
case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
++ctx->timed_out_retry_aux;
if (ctx->timed_out_retry_aux > AUX_TIMED_OUT_RETRY_COUNTER) {
ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
ctx->operation_succeeded = false;
} else {
/* DP 1.2a, table 2-58:
* "S3: AUX Request CMD PENDING:
* retry 3 times, with 400usec wait on each"
* The HW timeout is set to 550usec,
* so we should not wait here
*/
}
break;
case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
ctx->operation_succeeded = false;
break;
default:
ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
ctx->operation_succeeded = false;
}
}
static bool read_command(
struct aux_engine *engine,
struct i2caux_transaction_request *request,
bool middle_of_transaction)
{
struct read_command_context ctx;
ctx.buffer = request->payload.data;
ctx.current_read_length = request->payload.length;
ctx.offset = 0;
ctx.timed_out_retry_aux = 0;
ctx.invalid_reply_retry_aux = 0;
ctx.defer_retry_aux = 0;
ctx.defer_retry_i2c = 0;
ctx.invalid_reply_retry_aux_on_ack = 0;
ctx.transaction_complete = false;
ctx.operation_succeeded = true;
if (request->payload.address_space ==
I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
ctx.request.type = AUX_TRANSACTION_TYPE_DP;
ctx.request.action = I2CAUX_TRANSACTION_ACTION_DP_READ;
ctx.request.address = request->payload.address;
} else if (request->payload.address_space ==
I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C) {
ctx.request.type = AUX_TRANSACTION_TYPE_I2C;
ctx.request.action = middle_of_transaction ?
I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT :
I2CAUX_TRANSACTION_ACTION_I2C_READ;
ctx.request.address = request->payload.address >> 1;
} else {
/* in DAL2, there was no return in such case */
BREAK_TO_DEBUGGER();
return false;
}
ctx.request.delay = 0;
do {
memset(ctx.buffer + ctx.offset, 0, ctx.current_read_length);
ctx.request.data = ctx.buffer + ctx.offset;
ctx.request.length = ctx.current_read_length;
process_read_request(engine, &ctx);
request->status = ctx.status;
if (ctx.operation_succeeded && !ctx.transaction_complete)
if (ctx.request.type == AUX_TRANSACTION_TYPE_I2C)
msleep(engine->delay);
} while (ctx.operation_succeeded && !ctx.transaction_complete);
if (request->payload.address_space ==
I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
DC_LOG_I2C_AUX("READ: addr:0x%x value:0x%x Result:%d",
request->payload.address,
request->payload.data[0],
ctx.operation_succeeded);
}
return ctx.operation_succeeded;
}
static void process_write_reply(
struct aux_engine *engine,
struct write_command_context *ctx)
{
engine->funcs->process_channel_reply(engine, &ctx->reply);
switch (ctx->reply.status) {
case AUX_TRANSACTION_REPLY_AUX_ACK:
ctx->operation_succeeded = true;
if (ctx->returned_byte) {
ctx->request.action = ctx->mot ?
I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT :
I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST;
ctx->current_write_length = 0;
++ctx->ack_m_retry;
if (ctx->ack_m_retry > AUX_DEFER_RETRY_COUNTER) {
ctx->status =
I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
ctx->operation_succeeded = false;
} else
udelay(300);
} else {
ctx->status = I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
ctx->defer_retry_aux = 0;
ctx->ack_m_retry = 0;
ctx->transaction_complete = true;
}
break;
case AUX_TRANSACTION_REPLY_AUX_NACK:
ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_NACK;
ctx->operation_succeeded = false;
break;
case AUX_TRANSACTION_REPLY_AUX_DEFER:
++ctx->defer_retry_aux;
if (ctx->defer_retry_aux > ctx->max_defer_retry) {
ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
ctx->operation_succeeded = false;
}
break;
case AUX_TRANSACTION_REPLY_I2C_DEFER:
ctx->defer_retry_aux = 0;
ctx->current_write_length = 0;
ctx->request.action = ctx->mot ?
I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT :
I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST;
++ctx->defer_retry_i2c;
if (ctx->defer_retry_i2c > ctx->max_defer_retry) {
ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
ctx->operation_succeeded = false;
}
break;
case AUX_TRANSACTION_REPLY_HPD_DISCON:
ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
ctx->operation_succeeded = false;
break;
default:
ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
ctx->operation_succeeded = false;
}
}
static void process_write_request(
struct aux_engine *engine,
struct write_command_context *ctx)
{
enum aux_channel_operation_result operation_result;
engine->funcs->submit_channel_request(engine, &ctx->request);
operation_result = engine->funcs->get_channel_status(
engine, &ctx->returned_byte);
switch (operation_result) {
case AUX_CHANNEL_OPERATION_SUCCEEDED:
ctx->timed_out_retry_aux = 0;
ctx->invalid_reply_retry_aux = 0;
ctx->reply.length = ctx->returned_byte;
ctx->reply.data = ctx->reply_data;
process_write_reply(engine, ctx);
break;
case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
++ctx->invalid_reply_retry_aux;
if (ctx->invalid_reply_retry_aux >
AUX_INVALID_REPLY_RETRY_COUNTER) {
ctx->status =
I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
ctx->operation_succeeded = false;
} else
udelay(400);
break;
case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
++ctx->timed_out_retry_aux;
if (ctx->timed_out_retry_aux > AUX_TIMED_OUT_RETRY_COUNTER) {
ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
ctx->operation_succeeded = false;
} else {
/* DP 1.2a, table 2-58:
* "S3: AUX Request CMD PENDING:
* retry 3 times, with 400usec wait on each"
* The HW timeout is set to 550usec,
* so we should not wait here
*/
}
break;
case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
ctx->operation_succeeded = false;
break;
default:
ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
ctx->operation_succeeded = false;
}
}
static bool write_command(
struct aux_engine *engine,
struct i2caux_transaction_request *request,
bool middle_of_transaction)
{
struct write_command_context ctx;
ctx.mot = middle_of_transaction;
ctx.buffer = request->payload.data;
ctx.current_write_length = request->payload.length;
ctx.timed_out_retry_aux = 0;
ctx.invalid_reply_retry_aux = 0;
ctx.defer_retry_aux = 0;
ctx.defer_retry_i2c = 0;
ctx.ack_m_retry = 0;
ctx.transaction_complete = false;
ctx.operation_succeeded = true;
if (request->payload.address_space ==
I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
ctx.request.type = AUX_TRANSACTION_TYPE_DP;
ctx.request.action = I2CAUX_TRANSACTION_ACTION_DP_WRITE;
ctx.request.address = request->payload.address;
} else if (request->payload.address_space ==
I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C) {
ctx.request.type = AUX_TRANSACTION_TYPE_I2C;
ctx.request.action = middle_of_transaction ?
I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT :
I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
ctx.request.address = request->payload.address >> 1;
} else {
/* in DAL2, there was no return in such case */
BREAK_TO_DEBUGGER();
return false;
}
ctx.request.delay = 0;
ctx.max_defer_retry =
(engine->max_defer_write_retry > AUX_DEFER_RETRY_COUNTER) ?
engine->max_defer_write_retry : AUX_DEFER_RETRY_COUNTER;
do {
ctx.request.data = ctx.buffer;
ctx.request.length = ctx.current_write_length;
process_write_request(engine, &ctx);
request->status = ctx.status;
if (ctx.operation_succeeded && !ctx.transaction_complete)
if (ctx.request.type == AUX_TRANSACTION_TYPE_I2C)
msleep(engine->delay);
} while (ctx.operation_succeeded && !ctx.transaction_complete);
if (request->payload.address_space ==
I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
DC_LOG_I2C_AUX("WRITE: addr:0x%x value:0x%x Result:%d",
request->payload.address,
request->payload.data[0],
ctx.operation_succeeded);
}
return ctx.operation_succeeded;
}
static bool end_of_transaction_command(
struct aux_engine *engine,
struct i2caux_transaction_request *request)
{
struct i2caux_transaction_request dummy_request;
uint8_t dummy_data;
/* [tcheng] We only need to send the stop (read with MOT = 0)
* for I2C-over-Aux, not native AUX
*/
if (request->payload.address_space !=
I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C)
return false;
dummy_request.operation = request->operation;
dummy_request.payload.address_space = request->payload.address_space;
dummy_request.payload.address = request->payload.address;
/*
* Add a dummy byte due to some receiver quirk
* where one byte is sent along with MOT = 0.
* Ideally this should be 0.
*/
dummy_request.payload.length = 0;
dummy_request.payload.data = &dummy_data;
if (request->operation == I2CAUX_TRANSACTION_READ)
return read_command(engine, &dummy_request, false);
else
return write_command(engine, &dummy_request, false);
/* according Syed, it does not need now DoDummyMOT */
}
bool submit_request(
struct engine *engine,
struct i2caux_transaction_request *request,
bool middle_of_transaction)
{
struct aux_engine *aux_engine = FROM_AUX_ENGINE_ENGINE(engine);
bool result;
bool mot_used = true;
switch (request->operation) {
case I2CAUX_TRANSACTION_READ:
result = read_command(aux_engine, request, mot_used);
break;
case I2CAUX_TRANSACTION_WRITE:
result = write_command(aux_engine, request, mot_used);
break;
default:
result = false;
}
/* [tcheng]
* need to send stop for the last transaction to free up the AUX
* if the above command fails, this would be the last transaction
*/
if (!middle_of_transaction || !result)
end_of_transaction_command(aux_engine, request);
/* mask AUX interrupt */
return result;
}
enum i2caux_engine_type get_engine_type(
const struct engine *engine)
{
return I2CAUX_ENGINE_TYPE_AUX;
}
static struct aux_engine *acquire(
struct engine *engine,
struct ddc *ddc)
{
struct aux_engine *aux_engine = FROM_AUX_ENGINE_ENGINE(engine);
enum gpio_result result;
if (aux_engine->funcs->is_engine_available) {
/*check whether SW could use the engine*/
if (!aux_engine->funcs->is_engine_available(aux_engine))
return NULL;
}
result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE,
GPIO_DDC_CONFIG_TYPE_MODE_AUX);
if (result != GPIO_RESULT_OK)
return NULL;
if (!aux_engine->funcs->acquire_engine(aux_engine)) {
dal_ddc_close(ddc);
return NULL;
}
engine->ddc = ddc;
return aux_engine;
}
static const struct aux_engine_funcs aux_engine_funcs = {
.acquire_engine = acquire_engine,
.submit_channel_request = submit_channel_request,
.process_channel_reply = process_channel_reply,
.read_channel_reply = read_channel_reply,
.get_channel_status = get_channel_status,
.is_engine_available = is_engine_available,
};
static const struct engine_funcs engine_funcs = {
.release_engine = release_engine,
.destroy_engine = dce110_engine_destroy,
.submit_request = submit_request,
.get_engine_type = get_engine_type,
.acquire = acquire,
};
void dce110_engine_destroy(struct engine **engine)
{
struct aux_engine_dce110 *engine110 = FROM_ENGINE(*engine);
kfree(engine110);
*engine = NULL;
}
struct aux_engine *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_engine110,
struct dc_context *ctx,
uint32_t inst,
uint32_t timeout_period,
const struct dce110_aux_registers *regs)
{
aux_engine110->base.base.ddc = NULL;
aux_engine110->base.base.ctx = ctx;
aux_engine110->base.delay = 0;
aux_engine110->base.max_defer_write_retry = 0;
aux_engine110->base.base.funcs = &engine_funcs;
aux_engine110->base.funcs = &aux_engine_funcs;
aux_engine110->base.base.inst = inst;
aux_engine110->timeout_period = timeout_period;
aux_engine110->regs = regs;
return &aux_engine110->base;
}

View file

@ -0,0 +1,111 @@
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __DAL_AUX_ENGINE_DCE110_H__
#define __DAL_AUX_ENGINE_DCE110_H__
#include "aux_engine.h"
#define AUX_COMMON_REG_LIST(id)\
SRI(AUX_CONTROL, DP_AUX, id), \
SRI(AUX_ARB_CONTROL, DP_AUX, id), \
SRI(AUX_SW_DATA, DP_AUX, id), \
SRI(AUX_SW_CONTROL, DP_AUX, id), \
SRI(AUX_INTERRUPT_CONTROL, DP_AUX, id), \
SRI(AUX_SW_STATUS, DP_AUX, id), \
SR(AUXN_IMPCAL), \
SR(AUXP_IMPCAL)
struct dce110_aux_registers {
uint32_t AUX_CONTROL;
uint32_t AUX_ARB_CONTROL;
uint32_t AUX_SW_DATA;
uint32_t AUX_SW_CONTROL;
uint32_t AUX_INTERRUPT_CONTROL;
uint32_t AUX_SW_STATUS;
uint32_t AUXN_IMPCAL;
uint32_t AUXP_IMPCAL;
uint32_t AUX_RESET_MASK;
};
enum { /* This is the timeout as defined in DP 1.2a,
* 2.3.4 "Detailed uPacket TX AUX CH State Description".
*/
AUX_TIMEOUT_PERIOD = 400,
/* Ideally, the SW timeout should be just above 550usec
* which is programmed in HW.
* But the SW timeout of 600usec is not reliable,
* because on some systems, delay_in_microseconds()
* returns faster than it should.
* EPR #379763: by trial-and-error on different systems,
* 700usec is the minimum reliable SW timeout for polling
* the AUX_SW_STATUS.AUX_SW_DONE bit.
* This timeout expires *only* when there is
* AUX Error or AUX Timeout conditions - not during normal operation.
* During normal operation, AUX_SW_STATUS.AUX_SW_DONE bit is set
* at most within ~240usec. That means,
* increasing this timeout will not affect normal operation,
* and we'll timeout after
* SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD = 1600usec.
* This timeout is especially important for
* resume from S3 and CTS.
*/
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 4
};
struct aux_engine_dce110 {
struct aux_engine base;
const struct dce110_aux_registers *regs;
struct {
uint32_t aux_control;
uint32_t aux_arb_control;
uint32_t aux_sw_data;
uint32_t aux_sw_control;
uint32_t aux_interrupt_control;
uint32_t aux_sw_status;
} addr;
uint32_t timeout_period;
};
struct aux_engine_dce110_init_data {
uint32_t engine_id;
uint32_t timeout_period;
struct dc_context *ctx;
const struct dce110_aux_registers *regs;
};
struct aux_engine *dce110_aux_engine_construct(
struct aux_engine_dce110 *aux_engine110,
struct dc_context *ctx,
uint32_t inst,
uint32_t timeout_period,
const struct dce110_aux_registers *regs);
void dce110_engine_destroy(struct engine **engine);
bool dce110_aux_engine_acquire(
struct engine *aux_engine,
struct ddc *ddc);
#endif

View file

@ -337,7 +337,7 @@ static int dce112_set_clock(
static void dce_clock_read_integrated_info(struct dce_dccg *clk_dce)
{
struct dc_debug *debug = &clk_dce->base.ctx->dc->debug;
struct dc_debug_options *debug = &clk_dce->base.ctx->dc->debug;
struct dc_bios *bp = clk_dce->base.ctx->dc_bios;
struct integrated_info info = { { { 0 } } };
struct dc_firmware_info fw_info = { { 0 } };
@ -824,7 +824,7 @@ struct dccg *dce120_dccg_create(struct dc_context *ctx)
#ifdef CONFIG_X86
struct dccg *dcn1_dccg_create(struct dc_context *ctx)
{
struct dc_debug *debug = &ctx->dc->debug;
struct dc_debug_options *debug = &ctx->dc->debug;
struct dc_bios *bp = ctx->dc_bios;
struct dc_firmware_info fw_info = { { 0 } };
struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);

View file

@ -149,10 +149,6 @@ static uint32_t get_max_pixel_clock_for_all_paths(
max_pix_clk =
pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
}
if (max_pix_clk == 0)
ASSERT(0);
return max_pix_clk;
}

View file

@ -52,6 +52,7 @@
#include "dce/dce_10_0_sh_mask.h"
#include "dce/dce_dmcu.h"
#include "dce/dce_aux.h"
#include "dce/dce_abm.h"
#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
@ -279,7 +280,20 @@ static const struct dce_opp_shift opp_shift = {
static const struct dce_opp_mask opp_mask = {
OPP_COMMON_MASK_SH_LIST_DCE_100(_MASK)
};
#define aux_engine_regs(id)\
[id] = {\
AUX_COMMON_REG_LIST(id), \
.AUX_RESET_MASK = 0 \
}
static const struct dce110_aux_registers aux_engine_regs[] = {
aux_engine_regs(0),
aux_engine_regs(1),
aux_engine_regs(2),
aux_engine_regs(3),
aux_engine_regs(4),
aux_engine_regs(5)
};
#define audio_regs(id)\
[id] = {\
@ -572,6 +586,23 @@ struct output_pixel_processor *dce100_opp_create(
return &opp->base;
}
struct engine *dce100_aux_engine_create(
struct dc_context *ctx,
uint32_t inst)
{
struct aux_engine_dce110 *aux_engine =
kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
if (!aux_engine)
return NULL;
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
&aux_engine_regs[inst]);
return &aux_engine->base.base;
}
struct clock_source *dce100_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
@ -624,6 +655,10 @@ static void destruct(struct dce110_resource_pool *pool)
kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
if (pool->base.engines[i] != NULL)
dce110_engine_destroy(&pool->base.engines[i]);
}
for (i = 0; i < pool->base.stream_enc_count; i++) {
@ -678,9 +713,22 @@ bool dce100_validate_bandwidth(
struct dc *dc,
struct dc_state *context)
{
/* TODO implement when needed but for now hardcode max value*/
context->bw.dce.dispclk_khz = 681000;
context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER;
int i;
bool at_least_one_pipe = false;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (context->res_ctx.pipe_ctx[i].stream)
at_least_one_pipe = true;
}
if (at_least_one_pipe) {
/* TODO implement when needed but for now hardcode max value*/
context->bw.dce.dispclk_khz = 681000;
context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER;
} else {
context->bw.dce.dispclk_khz = 0;
context->bw.dce.yclk_khz = 0;
}
return true;
}
@ -915,6 +963,13 @@ static bool construct(
"DC: failed to create output pixel processor!\n");
goto res_create_fail;
}
pool->base.engines[i] = dce100_aux_engine_create(ctx, i);
if (pool->base.engines[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC:failed to create aux engine!!\n");
goto res_create_fail;
}
}
dc->caps.max_planes = pool->base.pipe_count;

View file

@ -49,6 +49,7 @@
#include "dce/dce_clock_source.h"
#include "dce/dce_hwseq.h"
#include "dce110/dce110_hw_sequencer.h"
#include "dce/dce_aux.h"
#include "dce/dce_abm.h"
#include "dce/dce_dmcu.h"
@ -306,6 +307,21 @@ static const struct dce_opp_mask opp_mask = {
OPP_COMMON_MASK_SH_LIST_DCE_110(_MASK)
};
#define aux_engine_regs(id)\
[id] = {\
AUX_COMMON_REG_LIST(id), \
.AUX_RESET_MASK = 0 \
}
static const struct dce110_aux_registers aux_engine_regs[] = {
aux_engine_regs(0),
aux_engine_regs(1),
aux_engine_regs(2),
aux_engine_regs(3),
aux_engine_regs(4),
aux_engine_regs(5)
};
#define audio_regs(id)\
[id] = {\
AUD_COMMON_REG_LIST(id)\
@ -588,6 +604,23 @@ static struct output_pixel_processor *dce110_opp_create(
return &opp->base;
}
struct engine *dce110_aux_engine_create(
struct dc_context *ctx,
uint32_t inst)
{
struct aux_engine_dce110 *aux_engine =
kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
if (!aux_engine)
return NULL;
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
&aux_engine_regs[inst]);
return &aux_engine->base.base;
}
struct clock_source *dce110_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
@ -651,6 +684,10 @@ static void destruct(struct dce110_resource_pool *pool)
kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
if (pool->base.engines[i] != NULL)
dce110_engine_destroy(&pool->base.engines[i]);
}
for (i = 0; i < pool->base.stream_enc_count; i++) {
@ -1258,6 +1295,14 @@ static bool construct(
"DC: failed to create output pixel processor!\n");
goto res_create_fail;
}
pool->base.engines[i] = dce110_aux_engine_create(ctx, i);
if (pool->base.engines[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC:failed to create aux engine!!\n");
goto res_create_fail;
}
}
dc->fbc_compressor = dce110_compressor_create(ctx);

View file

@ -49,6 +49,7 @@
#include "dce112/dce112_hw_sequencer.h"
#include "dce/dce_abm.h"
#include "dce/dce_dmcu.h"
#include "dce/dce_aux.h"
#include "reg_helper.h"
@ -314,6 +315,21 @@ static const struct dce_opp_mask opp_mask = {
OPP_COMMON_MASK_SH_LIST_DCE_112(_MASK)
};
#define aux_engine_regs(id)\
[id] = {\
AUX_COMMON_REG_LIST(id), \
.AUX_RESET_MASK = 0 \
}
static const struct dce110_aux_registers aux_engine_regs[] = {
aux_engine_regs(0),
aux_engine_regs(1),
aux_engine_regs(2),
aux_engine_regs(3),
aux_engine_regs(4),
aux_engine_regs(5)
};
#define audio_regs(id)\
[id] = {\
AUD_COMMON_REG_LIST(id)\
@ -588,6 +604,23 @@ struct output_pixel_processor *dce112_opp_create(
return &opp->base;
}
struct engine *dce112_aux_engine_create(
struct dc_context *ctx,
uint32_t inst)
{
struct aux_engine_dce110 *aux_engine =
kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
if (!aux_engine)
return NULL;
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
&aux_engine_regs[inst]);
return &aux_engine->base.base;
}
struct clock_source *dce112_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
@ -625,6 +658,9 @@ static void destruct(struct dce110_resource_pool *pool)
if (pool->base.opps[i] != NULL)
dce110_opp_destroy(&pool->base.opps[i]);
if (pool->base.engines[i] != NULL)
dce110_engine_destroy(&pool->base.engines[i]);
if (pool->base.transforms[i] != NULL)
dce112_transform_destroy(&pool->base.transforms[i]);
@ -640,6 +676,10 @@ static void destruct(struct dce110_resource_pool *pool)
kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
if (pool->base.engines[i] != NULL)
dce110_engine_destroy(&pool->base.engines[i]);
}
for (i = 0; i < pool->base.stream_enc_count; i++) {
@ -1208,6 +1248,13 @@ static bool construct(
"DC:failed to create output pixel processor!\n");
goto res_create_fail;
}
pool->base.engines[i] = dce112_aux_engine_create(ctx, i);
if (pool->base.engines[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC:failed to create aux engine!!\n");
goto res_create_fail;
}
}
if (!resource_construct(num_virtual_links, dc, &pool->base,

View file

@ -53,6 +53,7 @@
#include "dce/dce_hwseq.h"
#include "dce/dce_abm.h"
#include "dce/dce_dmcu.h"
#include "dce/dce_aux.h"
#include "dce/dce_12_0_offset.h"
#include "dce/dce_12_0_sh_mask.h"
@ -297,6 +298,20 @@ static const struct dce_opp_shift opp_shift = {
static const struct dce_opp_mask opp_mask = {
OPP_COMMON_MASK_SH_LIST_DCE_120(_MASK)
};
#define aux_engine_regs(id)\
[id] = {\
AUX_COMMON_REG_LIST(id), \
.AUX_RESET_MASK = 0 \
}
static const struct dce110_aux_registers aux_engine_regs[] = {
aux_engine_regs(0),
aux_engine_regs(1),
aux_engine_regs(2),
aux_engine_regs(3),
aux_engine_regs(4),
aux_engine_regs(5)
};
#define audio_regs(id)\
[id] = {\
@ -361,6 +376,22 @@ struct output_pixel_processor *dce120_opp_create(
ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask);
return &opp->base;
}
struct engine *dce120_aux_engine_create(
struct dc_context *ctx,
uint32_t inst)
{
struct aux_engine_dce110 *aux_engine =
kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
if (!aux_engine)
return NULL;
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
&aux_engine_regs[inst]);
return &aux_engine->base.base;
}
static const struct bios_registers bios_regs = {
.BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 + NBIO_BASE(mmBIOS_SCRATCH_6_BASE_IDX)
@ -373,7 +404,7 @@ static const struct resource_caps res_cap = {
.num_pll = 6,
};
static const struct dc_debug debug_defaults = {
static const struct dc_debug_options debug_defaults = {
.disable_clock_gate = true,
};
@ -467,6 +498,10 @@ static void destruct(struct dce110_resource_pool *pool)
kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
if (pool->base.engines[i] != NULL)
dce110_engine_destroy(&pool->base.engines[i]);
}
for (i = 0; i < pool->base.audio_count; i++) {
@ -984,6 +1019,13 @@ static bool construct(
dm_error(
"DC: failed to create output pixel processor!\n");
}
pool->base.engines[i] = dce120_aux_engine_create(ctx, i);
if (pool->base.engines[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC:failed to create aux engine!!\n");
goto res_create_fail;
}
/* check next valid pipe */
j++;

View file

@ -54,6 +54,7 @@
#include "reg_helper.h"
#include "dce/dce_dmcu.h"
#include "dce/dce_aux.h"
#include "dce/dce_abm.h"
/* TODO remove this include */
@ -298,6 +299,21 @@ static const struct dce_opp_mask opp_mask = {
OPP_COMMON_MASK_SH_LIST_DCE_80(_MASK)
};
#define aux_engine_regs(id)\
[id] = {\
AUX_COMMON_REG_LIST(id), \
.AUX_RESET_MASK = 0 \
}
static const struct dce110_aux_registers aux_engine_regs[] = {
aux_engine_regs(0),
aux_engine_regs(1),
aux_engine_regs(2),
aux_engine_regs(3),
aux_engine_regs(4),
aux_engine_regs(5)
};
#define audio_regs(id)\
[id] = {\
AUD_COMMON_REG_LIST(id)\
@ -448,6 +464,23 @@ static struct output_pixel_processor *dce80_opp_create(
return &opp->base;
}
struct engine *dce80_aux_engine_create(
struct dc_context *ctx,
uint32_t inst)
{
struct aux_engine_dce110 *aux_engine =
kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
if (!aux_engine)
return NULL;
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
&aux_engine_regs[inst]);
return &aux_engine->base.base;
}
static struct stream_encoder *dce80_stream_encoder_create(
enum engine_id eng_id,
struct dc_context *ctx)
@ -655,6 +688,9 @@ static void destruct(struct dce110_resource_pool *pool)
kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
if (pool->base.engines[i] != NULL)
dce110_engine_destroy(&pool->base.engines[i]);
}
for (i = 0; i < pool->base.stream_enc_count; i++) {
@ -899,6 +935,14 @@ static bool dce80_construct(
dm_error("DC: failed to create output pixel processor!\n");
goto res_create_fail;
}
pool->base.engines[i] = dce80_aux_engine_create(ctx, i);
if (pool->base.engines[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC:failed to create aux engine!!\n");
goto res_create_fail;
}
}
dc->caps.max_planes = pool->base.pipe_count;

View file

@ -294,6 +294,10 @@ void hubp1_program_pixel_format(
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 66);
break;
case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 12);
break;
default:
BREAK_TO_DEBUGGER();
break;

View file

@ -64,6 +64,7 @@
#include "reg_helper.h"
#include "dce/dce_abm.h"
#include "dce/dce_dmcu.h"
#include "dce/dce_aux.h"
const struct _vcs_dpi_ip_params_st dcn1_0_ip = {
.rob_buffer_size_kbytes = 64,
@ -356,6 +357,21 @@ static const struct dcn10_opp_mask opp_mask = {
OPP_MASK_SH_LIST_DCN10(_MASK),
};
#define aux_engine_regs(id)\
[id] = {\
AUX_COMMON_REG_LIST(id), \
.AUX_RESET_MASK = 0 \
}
static const struct dce110_aux_registers aux_engine_regs[] = {
aux_engine_regs(0),
aux_engine_regs(1),
aux_engine_regs(2),
aux_engine_regs(3),
aux_engine_regs(4),
aux_engine_regs(5)
};
#define tf_regs(id)\
[id] = {\
TF_REG_LIST_DCN10(id),\
@ -486,7 +502,7 @@ static const struct resource_caps res_cap = {
.num_pll = 4,
};
static const struct dc_debug debug_defaults_drv = {
static const struct dc_debug_options debug_defaults_drv = {
.sanity_checks = true,
.disable_dmcu = true,
.force_abm_enable = false,
@ -514,7 +530,7 @@ static const struct dc_debug debug_defaults_drv = {
.max_downscale_src_width = 3840,
};
static const struct dc_debug debug_defaults_diags = {
static const struct dc_debug_options debug_defaults_diags = {
.disable_dmcu = true,
.force_abm_enable = false,
.timing_trace = true,
@ -578,6 +594,23 @@ static struct output_pixel_processor *dcn10_opp_create(
return &opp->base;
}
struct engine *dcn10_aux_engine_create(
struct dc_context *ctx,
uint32_t inst)
{
struct aux_engine_dce110 *aux_engine =
kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
if (!aux_engine)
return NULL;
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
&aux_engine_regs[inst]);
return &aux_engine->base.base;
}
static struct mpc *dcn10_mpc_create(struct dc_context *ctx)
{
struct dcn10_mpc *mpc10 = kzalloc(sizeof(struct dcn10_mpc),
@ -826,6 +859,9 @@ static void destruct(struct dcn10_resource_pool *pool)
kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
if (pool->base.engines[i] != NULL)
pool->base.engines[i]->funcs->destroy_engine(&pool->base.engines[i]);
}
for (i = 0; i < pool->base.stream_enc_count; i++)
@ -1255,6 +1291,14 @@ static bool construct(
goto fail;
}
pool->base.engines[i] = dcn10_aux_engine_create(ctx, i);
if (pool->base.engines[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC:failed to create aux engine!!\n");
goto fail;
}
/* check next valid pipe */
j++;
}

View file

@ -96,6 +96,7 @@ struct engine_funcs {
struct engine {
const struct engine_funcs *funcs;
uint32_t inst;
struct ddc *ddc;
struct dc_context *ctx;
};

View file

@ -138,7 +138,7 @@ struct resource_pool {
struct output_pixel_processor *opps[MAX_PIPES];
struct timing_generator *timing_generators[MAX_PIPES];
struct stream_encoder *stream_enc[MAX_PIPES * 2];
struct engine *engines[MAX_PIPES];
struct hubbub *hubbub;
struct mpc *mpc;
struct pp_smu_funcs_rv *pp_smu;

View file

@ -33,7 +33,7 @@ struct dc_link;
struct dc_stream_state;
struct dc_link_settings;
bool dp_hbr_verify_link_cap(
bool dp_verify_link_cap(
struct dc_link *link,
struct dc_link_settings *known_limit_link_setting);

View file

@ -0,0 +1,113 @@
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __DAL_AUX_ENGINE_H__
#define __DAL_AUX_ENGINE_H__
#include "engine.h"
#include "include/i2caux_interface.h"
struct aux_engine;
union aux_config;
struct aux_engine_funcs {
void (*destroy)(
struct aux_engine **ptr);
bool (*acquire_engine)(
struct aux_engine *engine);
void (*configure)(
struct aux_engine *engine,
union aux_config cfg);
void (*submit_channel_request)(
struct aux_engine *engine,
struct aux_request_transaction_data *request);
void (*process_channel_reply)(
struct aux_engine *engine,
struct aux_reply_transaction_data *reply);
int (*read_channel_reply)(
struct aux_engine *engine,
uint32_t size,
uint8_t *buffer,
uint8_t *reply_result,
uint32_t *sw_status);
enum aux_channel_operation_result (*get_channel_status)(
struct aux_engine *engine,
uint8_t *returned_bytes);
bool (*is_engine_available)(struct aux_engine *engine);
};
struct engine;
struct aux_engine {
struct engine base;
const struct aux_engine_funcs *funcs;
/* following values are expressed in milliseconds */
uint32_t delay;
uint32_t max_defer_write_retry;
bool acquire_reset;
};
struct read_command_context {
uint8_t *buffer;
uint32_t current_read_length;
uint32_t offset;
enum i2caux_transaction_status status;
struct aux_request_transaction_data request;
struct aux_reply_transaction_data reply;
uint8_t returned_byte;
uint32_t timed_out_retry_aux;
uint32_t invalid_reply_retry_aux;
uint32_t defer_retry_aux;
uint32_t defer_retry_i2c;
uint32_t invalid_reply_retry_aux_on_ack;
bool transaction_complete;
bool operation_succeeded;
};
struct write_command_context {
bool mot;
uint8_t *buffer;
uint32_t current_write_length;
enum i2caux_transaction_status status;
struct aux_request_transaction_data request;
struct aux_reply_transaction_data reply;
uint8_t returned_byte;
uint32_t timed_out_retry_aux;
uint32_t invalid_reply_retry_aux;
uint32_t defer_retry_aux;
uint32_t defer_retry_i2c;
uint32_t max_defer_retry;
uint32_t ack_m_retry;
uint8_t reply_data[DEFAULT_AUX_MAX_DATA_SIZE];
bool transaction_complete;
bool operation_succeeded;
};
#endif

View file

@ -0,0 +1,106 @@
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __DAL_ENGINE_H__
#define __DAL_ENGINE_H__
#include "dc_ddc_types.h"
enum i2caux_transaction_operation {
I2CAUX_TRANSACTION_READ,
I2CAUX_TRANSACTION_WRITE
};
enum i2caux_transaction_address_space {
I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C = 1,
I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD
};
struct i2caux_transaction_payload {
enum i2caux_transaction_address_space address_space;
uint32_t address;
uint32_t length;
uint8_t *data;
};
enum i2caux_transaction_status {
I2CAUX_TRANSACTION_STATUS_UNKNOWN = (-1L),
I2CAUX_TRANSACTION_STATUS_SUCCEEDED,
I2CAUX_TRANSACTION_STATUS_FAILED_CHANNEL_BUSY,
I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT,
I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR,
I2CAUX_TRANSACTION_STATUS_FAILED_NACK,
I2CAUX_TRANSACTION_STATUS_FAILED_INCOMPLETE,
I2CAUX_TRANSACTION_STATUS_FAILED_OPERATION,
I2CAUX_TRANSACTION_STATUS_FAILED_INVALID_OPERATION,
I2CAUX_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW,
I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON
};
struct i2caux_transaction_request {
enum i2caux_transaction_operation operation;
struct i2caux_transaction_payload payload;
enum i2caux_transaction_status status;
};
enum i2caux_engine_type {
I2CAUX_ENGINE_TYPE_UNKNOWN = (-1L),
I2CAUX_ENGINE_TYPE_AUX,
I2CAUX_ENGINE_TYPE_I2C_DDC_HW,
I2CAUX_ENGINE_TYPE_I2C_GENERIC_HW,
I2CAUX_ENGINE_TYPE_I2C_SW
};
enum i2c_default_speed {
I2CAUX_DEFAULT_I2C_HW_SPEED = 50,
I2CAUX_DEFAULT_I2C_SW_SPEED = 50
};
struct engine;
struct engine_funcs {
enum i2caux_engine_type (*get_engine_type)(
const struct engine *engine);
struct aux_engine* (*acquire)(
struct engine *engine,
struct ddc *ddc);
bool (*submit_request)(
struct engine *engine,
struct i2caux_transaction_request *request,
bool middle_of_transaction);
void (*release_engine)(
struct engine *engine);
void (*destroy_engine)(
struct engine **engine);
};
struct engine {
const struct engine_funcs *funcs;
uint32_t inst;
struct ddc *ddc;
struct dc_context *ctx;
};
#endif

View file

@ -998,7 +998,7 @@ static int pp_get_display_power_level(void *handle,
static int pp_get_current_clocks(void *handle,
struct amd_pp_clock_info *clocks)
{
struct amd_pp_simple_clock_info simple_clocks;
struct amd_pp_simple_clock_info simple_clocks = { 0 };
struct pp_clock_info hw_clocks;
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
@ -1034,7 +1034,10 @@ static int pp_get_current_clocks(void *handle,
clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
clocks->max_clocks_state = simple_clocks.level;
if (simple_clocks.level == 0)
clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
else
clocks->max_clocks_state = simple_clocks.level;
if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
@ -1137,6 +1140,8 @@ static int pp_get_display_mode_validation_clocks(void *handle,
if (!hwmgr || !hwmgr->pm_en ||!clocks)
return -EINVAL;
clocks->level = PP_DAL_POWERLEVEL_7;
mutex_lock(&hwmgr->smu_lock);
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))

View file

@ -289,7 +289,15 @@ static int vega10_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table;
struct phm_ppt_v1_clock_voltage_dependency_table *dep_table[3];
struct phm_ppt_v1_clock_voltage_dependency_table *od_table[3];
struct pp_atomfwctrl_avfs_parameters avfs_params = {0};
uint32_t i;
int result;
result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
if (!result) {
data->odn_dpm_table.max_vddc = avfs_params.ulMaxVddc;
data->odn_dpm_table.min_vddc = avfs_params.ulMinVddc;
}
od_lookup_table = &odn_table->vddc_lookup_table;
vddc_lookup_table = table_info->vddc_lookup_table;
@ -2072,9 +2080,6 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
if (data->smu_features[GNLD_AVFS].supported) {
result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
if (!result) {
data->odn_dpm_table.max_vddc = avfs_params.ulMaxVddc;
data->odn_dpm_table.min_vddc = avfs_params.ulMinVddc;
pp_table->MinVoltageVid = (uint8_t)
convert_to_vid((uint16_t)(avfs_params.ulMinVddc));
pp_table->MaxVoltageVid = (uint8_t)
@ -3254,10 +3259,25 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
{
int result = 0;
struct vega10_hwmgr *data = hwmgr->backend;
struct vega10_dpm_table *dpm_table = &data->dpm_table;
struct vega10_odn_dpm_table *odn_table = &data->odn_dpm_table;
struct vega10_odn_clock_voltage_dependency_table *odn_clk_table = &odn_table->vdd_dep_on_sclk;
int count;
if (!data->need_update_dpm_table)
return 0;
if (hwmgr->od_enabled && data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
for (count = 0; count < dpm_table->gfx_table.count; count++)
dpm_table->gfx_table.dpm_levels[count].value = odn_clk_table->entries[count].clk;
}
odn_clk_table = &odn_table->vdd_dep_on_mclk;
if (hwmgr->od_enabled && data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
for (count = 0; count < dpm_table->mem_table.count; count++)
dpm_table->mem_table.dpm_levels[count].value = odn_clk_table->entries[count].clk;
}
if (data->need_update_dpm_table &
(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK + DPMTABLE_UPDATE_SOCCLK)) {
result = vega10_populate_all_graphic_levels(hwmgr);
@ -3705,7 +3725,7 @@ static void vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr,
{
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetUclkFastSwitch,
has_disp ? 0 : 1);
has_disp ? 1 : 0);
}
int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
@ -3780,7 +3800,9 @@ static int vega10_notify_smc_display_config_after_ps_adjustment(
uint32_t i;
struct pp_display_clock_request clock_req;
if (hwmgr->display_config->num_display > 1)
if ((hwmgr->display_config->num_display > 1) &&
!hwmgr->display_config->multi_monitor_in_sync &&
!hwmgr->display_config->nb_pstate_switch_disable)
vega10_notify_smc_display_change(hwmgr, false);
else
vega10_notify_smc_display_change(hwmgr, true);

View file

@ -1334,7 +1334,7 @@ static int vega12_notify_smc_display_change(struct pp_hwmgr *hwmgr,
if (data->smu_features[GNLD_DPM_UCLK].enabled)
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetUclkFastSwitch,
has_disp ? 0 : 1);
has_disp ? 1 : 0);
return 0;
}
@ -1389,7 +1389,8 @@ static int vega12_notify_smc_display_config_after_ps_adjustment(
struct pp_display_clock_request clock_req;
if ((hwmgr->display_config->num_display > 1) &&
!hwmgr->display_config->multi_monitor_in_sync)
!hwmgr->display_config->multi_monitor_in_sync &&
!hwmgr->display_config->nb_pstate_switch_disable)
vega12_notify_smc_display_change(hwmgr, false);
else
vega12_notify_smc_display_change(hwmgr, true);

View file

@ -78,8 +78,7 @@ static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
gpu->lastctx = NULL;
mutex_unlock(&gpu->lock);
drm_sched_entity_destroy(&gpu->sched,
&ctx->sched_entity[i]);
drm_sched_entity_destroy(&ctx->sched_entity[i]);
}
}

View file

@ -118,8 +118,8 @@ int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
{
int ret;
ret = drm_sched_job_init(&submit->sched_job, &submit->gpu->sched,
sched_entity, submit->cmdbuf.ctx);
ret = drm_sched_job_init(&submit->sched_job, sched_entity,
submit->cmdbuf.ctx);
if (ret)
return ret;

View file

@ -185,7 +185,6 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
memset(entity, 0, sizeof(struct drm_sched_entity));
INIT_LIST_HEAD(&entity->list);
entity->rq = rq_list[0];
entity->sched = rq_list[0]->sched;
entity->guilty = guilty;
entity->last_scheduled = NULL;
@ -210,8 +209,8 @@ EXPORT_SYMBOL(drm_sched_entity_init);
static bool drm_sched_entity_is_initialized(struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity)
{
return entity->sched == sched &&
entity->rq != NULL;
return entity->rq != NULL &&
entity->rq->sched == sched;
}
/**
@ -273,11 +272,12 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
*
* Returns the remaining time in jiffies left from the input timeout
*/
long drm_sched_entity_flush(struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity, long timeout)
long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
{
struct drm_gpu_scheduler *sched;
long ret = timeout;
sched = entity->rq->sched;
if (!drm_sched_entity_is_initialized(sched, entity))
return ret;
/**
@ -312,10 +312,11 @@ EXPORT_SYMBOL(drm_sched_entity_flush);
* entity and signals all jobs with an error code if the process was killed.
*
*/
void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity)
void drm_sched_entity_fini(struct drm_sched_entity *entity)
{
struct drm_gpu_scheduler *sched;
sched = entity->rq->sched;
drm_sched_entity_set_rq(entity, NULL);
/* Consumption of existing IBs wasn't completed. Forcefully
@ -373,11 +374,10 @@ EXPORT_SYMBOL(drm_sched_entity_fini);
*
* Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
*/
void drm_sched_entity_destroy(struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity)
void drm_sched_entity_destroy(struct drm_sched_entity *entity)
{
drm_sched_entity_flush(sched, entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
drm_sched_entity_fini(sched, entity);
drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
drm_sched_entity_fini(entity);
}
EXPORT_SYMBOL(drm_sched_entity_destroy);
@ -387,7 +387,7 @@ static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb
container_of(cb, struct drm_sched_entity, cb);
entity->dependency = NULL;
dma_fence_put(f);
drm_sched_wakeup(entity->sched);
drm_sched_wakeup(entity->rq->sched);
}
static void drm_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
@ -437,7 +437,7 @@ EXPORT_SYMBOL(drm_sched_entity_set_rq);
bool drm_sched_dependency_optimized(struct dma_fence* fence,
struct drm_sched_entity *entity)
{
struct drm_gpu_scheduler *sched = entity->sched;
struct drm_gpu_scheduler *sched = entity->rq->sched;
struct drm_sched_fence *s_fence;
if (!fence || dma_fence_is_signaled(fence))
@ -454,7 +454,7 @@ EXPORT_SYMBOL(drm_sched_dependency_optimized);
static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
{
struct drm_gpu_scheduler *sched = entity->sched;
struct drm_gpu_scheduler *sched = entity->rq->sched;
struct dma_fence * fence = entity->dependency;
struct drm_sched_fence *s_fence;
@ -499,7 +499,7 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
static struct drm_sched_job *
drm_sched_entity_pop_job(struct drm_sched_entity *entity)
{
struct drm_gpu_scheduler *sched = entity->sched;
struct drm_gpu_scheduler *sched = entity->rq->sched;
struct drm_sched_job *sched_job = to_drm_sched_job(
spsc_queue_peek(&entity->job_queue));
@ -740,10 +740,11 @@ EXPORT_SYMBOL(drm_sched_job_recovery);
* Returns 0 for success, negative error code otherwise.
*/
int drm_sched_job_init(struct drm_sched_job *job,
struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity,
void *owner)
{
struct drm_gpu_scheduler *sched = entity->rq->sched;
job->sched = sched;
job->entity = entity;
job->s_priority = entity->rq - sched->sched_rq;

View file

@ -161,7 +161,7 @@ struct drm_sched_fence *drm_sched_fence_create(struct drm_sched_entity *entity,
return NULL;
fence->owner = owner;
fence->sched = entity->sched;
fence->sched = entity->rq->sched;
spin_lock_init(&fence->lock);
seq = atomic_inc_return(&entity->fence_seq);

View file

@ -145,13 +145,11 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
static void
v3d_postclose(struct drm_device *dev, struct drm_file *file)
{
struct v3d_dev *v3d = to_v3d_dev(dev);
struct v3d_file_priv *v3d_priv = file->driver_priv;
enum v3d_queue q;
for (q = 0; q < V3D_MAX_QUEUES; q++) {
drm_sched_entity_destroy(&v3d->queue[q].sched,
&v3d_priv->sched_entity[q]);
drm_sched_entity_destroy(&v3d_priv->sched_entity[q]);
}
kfree(v3d_priv);

View file

@ -553,7 +553,6 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
mutex_lock(&v3d->sched_lock);
if (exec->bin.start != exec->bin.end) {
ret = drm_sched_job_init(&exec->bin.base,
&v3d->queue[V3D_BIN].sched,
&v3d_priv->sched_entity[V3D_BIN],
v3d_priv);
if (ret)
@ -568,7 +567,6 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
}
ret = drm_sched_job_init(&exec->render.base,
&v3d->queue[V3D_RENDER].sched,
&v3d_priv->sched_entity[V3D_RENDER],
v3d_priv);
if (ret)

View file

@ -52,7 +52,6 @@ enum drm_sched_priority {
* runqueue.
* @rq: runqueue to which this entity belongs.
* @rq_lock: lock to modify the runqueue to which this entity belongs.
* @sched: the scheduler instance to which this entity is enqueued.
* @job_queue: the list of jobs of this entity.
* @fence_seq: a linearly increasing seqno incremented with each
* new &drm_sched_fence which is part of the entity.
@ -76,7 +75,6 @@ struct drm_sched_entity {
struct list_head list;
struct drm_sched_rq *rq;
spinlock_t rq_lock;
struct drm_gpu_scheduler *sched;
struct spsc_queue job_queue;
@ -286,12 +284,9 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
struct drm_sched_rq **rq_list,
unsigned int num_rq_list,
atomic_t *guilty);
long drm_sched_entity_flush(struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity, long timeout);
void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity);
void drm_sched_entity_destroy(struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity);
long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
void drm_sched_entity_fini(struct drm_sched_entity *entity);
void drm_sched_entity_destroy(struct drm_sched_entity *entity);
void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
struct drm_sched_entity *entity);
void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
@ -302,7 +297,6 @@ struct drm_sched_fence *drm_sched_fence_create(
void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
void drm_sched_fence_finished(struct drm_sched_fence *fence);
int drm_sched_job_init(struct drm_sched_job *job,
struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity,
void *owner);
void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,