mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-29 23:53:32 +00:00
Merge branch 'drm-next-4.19' of git://people.freedesktop.org/~agd5f/linux into drm-next
More features for 4.19: - Map processes to vmids for debugging GPUVM faults - Raven gfxoff fixes - Initial gfxoff support for vega12 - Use defines for interrupt sources rather than magic numbers - DC aux fixes - Finish DC logging TODO - Add more DC debugfs interfaces for conformance testing - Add CRC support for DCN - Scheduler rework in preparation for load balancing - Unify common smu9 code - Clean up UVD instancing support - ttm cleanups - Misc fixes and cleanups Signed-off-by: Dave Airlie <airlied@redhat.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180719194001.3488-1-alexander.deucher@amd.com
This commit is contained in:
commit
500775074f
189 changed files with 3673 additions and 2295 deletions
|
@ -73,6 +73,7 @@
|
|||
#include "amdgpu_virt.h"
|
||||
#include "amdgpu_gart.h"
|
||||
#include "amdgpu_debugfs.h"
|
||||
#include "amdgpu_job.h"
|
||||
|
||||
/*
|
||||
* Modules parameters.
|
||||
|
@ -105,11 +106,8 @@ extern int amdgpu_vm_fault_stop;
|
|||
extern int amdgpu_vm_debug;
|
||||
extern int amdgpu_vm_update_mode;
|
||||
extern int amdgpu_dc;
|
||||
extern int amdgpu_dc_log;
|
||||
extern int amdgpu_sched_jobs;
|
||||
extern int amdgpu_sched_hw_submission;
|
||||
extern int amdgpu_no_evict;
|
||||
extern int amdgpu_direct_gma_size;
|
||||
extern uint amdgpu_pcie_gen_cap;
|
||||
extern uint amdgpu_pcie_lane_cap;
|
||||
extern uint amdgpu_cg_mask;
|
||||
|
@ -600,17 +598,6 @@ struct amdgpu_ib {
|
|||
|
||||
extern const struct drm_sched_backend_ops amdgpu_sched_ops;
|
||||
|
||||
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
|
||||
struct amdgpu_job **job, struct amdgpu_vm *vm);
|
||||
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
|
||||
struct amdgpu_job **job);
|
||||
|
||||
void amdgpu_job_free_resources(struct amdgpu_job *job);
|
||||
void amdgpu_job_free(struct amdgpu_job *job);
|
||||
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
|
||||
struct drm_sched_entity *entity, void *owner,
|
||||
struct dma_fence **f);
|
||||
|
||||
/*
|
||||
* Queue manager
|
||||
*/
|
||||
|
@ -732,6 +719,14 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
|
|||
struct list_head *validated);
|
||||
void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
|
||||
void amdgpu_bo_list_free(struct amdgpu_bo_list *list);
|
||||
int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
|
||||
struct drm_amdgpu_bo_list_entry **info_param);
|
||||
|
||||
int amdgpu_bo_list_create(struct amdgpu_device *adev,
|
||||
struct drm_file *filp,
|
||||
struct drm_amdgpu_bo_list_entry *info,
|
||||
unsigned num_entries,
|
||||
struct amdgpu_bo_list **list);
|
||||
|
||||
/*
|
||||
* GFX stuff
|
||||
|
@ -1029,6 +1024,7 @@ struct amdgpu_cs_parser {
|
|||
|
||||
/* scheduler job object */
|
||||
struct amdgpu_job *job;
|
||||
struct amdgpu_ring *ring;
|
||||
|
||||
/* buffer objects */
|
||||
struct ww_acquire_ctx ticket;
|
||||
|
@ -1050,40 +1046,6 @@ struct amdgpu_cs_parser {
|
|||
struct drm_syncobj **post_dep_syncobjs;
|
||||
};
|
||||
|
||||
#define AMDGPU_PREAMBLE_IB_PRESENT (1 << 0) /* bit set means command submit involves a preamble IB */
|
||||
#define AMDGPU_PREAMBLE_IB_PRESENT_FIRST (1 << 1) /* bit set means preamble IB is first presented in belonging context */
|
||||
#define AMDGPU_HAVE_CTX_SWITCH (1 << 2) /* bit set means context switch occured */
|
||||
|
||||
struct amdgpu_job {
|
||||
struct drm_sched_job base;
|
||||
struct amdgpu_device *adev;
|
||||
struct amdgpu_vm *vm;
|
||||
struct amdgpu_ring *ring;
|
||||
struct amdgpu_sync sync;
|
||||
struct amdgpu_sync sched_sync;
|
||||
struct amdgpu_ib *ibs;
|
||||
struct dma_fence *fence; /* the hw fence */
|
||||
uint32_t preamble_status;
|
||||
uint32_t num_ibs;
|
||||
void *owner;
|
||||
uint64_t fence_ctx; /* the fence_context this job uses */
|
||||
bool vm_needs_flush;
|
||||
uint64_t vm_pd_addr;
|
||||
unsigned vmid;
|
||||
unsigned pasid;
|
||||
uint32_t gds_base, gds_size;
|
||||
uint32_t gws_base, gws_size;
|
||||
uint32_t oa_base, oa_size;
|
||||
uint32_t vram_lost_counter;
|
||||
|
||||
/* user fence handling */
|
||||
uint64_t uf_addr;
|
||||
uint64_t uf_sequence;
|
||||
|
||||
};
|
||||
#define to_amdgpu_job(sched_job) \
|
||||
container_of((sched_job), struct amdgpu_job, base)
|
||||
|
||||
static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p,
|
||||
uint32_t ib_idx, int idx)
|
||||
{
|
||||
|
@ -1398,6 +1360,7 @@ enum amd_hw_ip_block_type {
|
|||
PWR_HWIP,
|
||||
NBIF_HWIP,
|
||||
THM_HWIP,
|
||||
CLK_HWIP,
|
||||
MAX_HWIP
|
||||
};
|
||||
|
||||
|
@ -1588,9 +1551,9 @@ struct amdgpu_device {
|
|||
DECLARE_HASHTABLE(mn_hash, 7);
|
||||
|
||||
/* tracking pinned memory */
|
||||
u64 vram_pin_size;
|
||||
u64 invisible_pin_size;
|
||||
u64 gart_pin_size;
|
||||
atomic64_t vram_pin_size;
|
||||
atomic64_t visible_pin_size;
|
||||
atomic64_t gart_pin_size;
|
||||
|
||||
/* amdkfd interface */
|
||||
struct kfd_dev *kfd;
|
||||
|
|
|
@ -251,7 +251,6 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
|
|||
struct amdgpu_bo *bo = NULL;
|
||||
struct amdgpu_bo_param bp;
|
||||
int r;
|
||||
uint64_t gpu_addr_tmp = 0;
|
||||
void *cpu_ptr_tmp = NULL;
|
||||
|
||||
memset(&bp, 0, sizeof(bp));
|
||||
|
@ -275,13 +274,18 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
|
|||
goto allocate_mem_reserve_bo_failed;
|
||||
}
|
||||
|
||||
r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT,
|
||||
&gpu_addr_tmp);
|
||||
r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) failed to pin bo for amdkfd\n", r);
|
||||
goto allocate_mem_pin_bo_failed;
|
||||
}
|
||||
|
||||
r = amdgpu_ttm_alloc_gart(&bo->tbo);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "%p bind failed\n", bo);
|
||||
goto allocate_mem_kmap_bo_failed;
|
||||
}
|
||||
|
||||
r = amdgpu_bo_kmap(bo, &cpu_ptr_tmp);
|
||||
if (r) {
|
||||
dev_err(adev->dev,
|
||||
|
@ -290,7 +294,7 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
|
|||
}
|
||||
|
||||
*mem_obj = bo;
|
||||
*gpu_addr = gpu_addr_tmp;
|
||||
*gpu_addr = amdgpu_bo_gpu_offset(bo);
|
||||
*cpu_ptr = cpu_ptr_tmp;
|
||||
|
||||
amdgpu_bo_unreserve(bo);
|
||||
|
|
|
@ -1587,7 +1587,7 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
|
|||
goto bo_reserve_failed;
|
||||
}
|
||||
|
||||
ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
|
||||
ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
|
||||
if (ret) {
|
||||
pr_err("Failed to pin bo. ret %d\n", ret);
|
||||
goto pin_failed;
|
||||
|
|
|
@ -95,11 +95,17 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
|
|||
r = amdgpu_bo_reserve(sobj, false);
|
||||
if (unlikely(r != 0))
|
||||
goto out_cleanup;
|
||||
r = amdgpu_bo_pin(sobj, sdomain, &saddr);
|
||||
r = amdgpu_bo_pin(sobj, sdomain);
|
||||
if (r) {
|
||||
amdgpu_bo_unreserve(sobj);
|
||||
goto out_cleanup;
|
||||
}
|
||||
r = amdgpu_ttm_alloc_gart(&sobj->tbo);
|
||||
amdgpu_bo_unreserve(sobj);
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
saddr = amdgpu_bo_gpu_offset(sobj);
|
||||
bp.domain = ddomain;
|
||||
r = amdgpu_bo_create(adev, &bp, &dobj);
|
||||
if (r) {
|
||||
|
@ -108,11 +114,17 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
|
|||
r = amdgpu_bo_reserve(dobj, false);
|
||||
if (unlikely(r != 0))
|
||||
goto out_cleanup;
|
||||
r = amdgpu_bo_pin(dobj, ddomain, &daddr);
|
||||
r = amdgpu_bo_pin(dobj, ddomain);
|
||||
if (r) {
|
||||
amdgpu_bo_unreserve(sobj);
|
||||
goto out_cleanup;
|
||||
}
|
||||
r = amdgpu_ttm_alloc_gart(&dobj->tbo);
|
||||
amdgpu_bo_unreserve(dobj);
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
daddr = amdgpu_bo_gpu_offset(dobj);
|
||||
|
||||
if (adev->mman.buffer_funcs) {
|
||||
time = amdgpu_benchmark_do_move(adev, size, saddr, daddr, n);
|
||||
|
|
|
@ -55,15 +55,15 @@ static void amdgpu_bo_list_release_rcu(struct kref *ref)
|
|||
kfree_rcu(list, rhead);
|
||||
}
|
||||
|
||||
static int amdgpu_bo_list_create(struct amdgpu_device *adev,
|
||||
int amdgpu_bo_list_create(struct amdgpu_device *adev,
|
||||
struct drm_file *filp,
|
||||
struct drm_amdgpu_bo_list_entry *info,
|
||||
unsigned num_entries,
|
||||
int *id)
|
||||
struct amdgpu_bo_list **list_out)
|
||||
{
|
||||
int r;
|
||||
struct amdgpu_fpriv *fpriv = filp->driver_priv;
|
||||
struct amdgpu_bo_list *list;
|
||||
int r;
|
||||
|
||||
|
||||
list = kzalloc(sizeof(struct amdgpu_bo_list), GFP_KERNEL);
|
||||
if (!list)
|
||||
|
@ -78,16 +78,7 @@ static int amdgpu_bo_list_create(struct amdgpu_device *adev,
|
|||
return r;
|
||||
}
|
||||
|
||||
/* idr alloc should be called only after initialization of bo list. */
|
||||
mutex_lock(&fpriv->bo_list_lock);
|
||||
r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL);
|
||||
mutex_unlock(&fpriv->bo_list_lock);
|
||||
if (r < 0) {
|
||||
amdgpu_bo_list_free(list);
|
||||
return r;
|
||||
}
|
||||
*id = r;
|
||||
|
||||
*list_out = list;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -263,55 +254,79 @@ void amdgpu_bo_list_free(struct amdgpu_bo_list *list)
|
|||
kfree(list);
|
||||
}
|
||||
|
||||
int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp)
|
||||
int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
|
||||
struct drm_amdgpu_bo_list_entry **info_param)
|
||||
{
|
||||
const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr);
|
||||
const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry);
|
||||
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct amdgpu_fpriv *fpriv = filp->driver_priv;
|
||||
union drm_amdgpu_bo_list *args = data;
|
||||
uint32_t handle = args->in.list_handle;
|
||||
const void __user *uptr = u64_to_user_ptr(args->in.bo_info_ptr);
|
||||
|
||||
struct drm_amdgpu_bo_list_entry *info;
|
||||
struct amdgpu_bo_list *list;
|
||||
|
||||
int r;
|
||||
|
||||
info = kvmalloc_array(args->in.bo_number,
|
||||
sizeof(struct drm_amdgpu_bo_list_entry), GFP_KERNEL);
|
||||
info = kvmalloc_array(in->bo_number, info_size, GFP_KERNEL);
|
||||
if (!info)
|
||||
return -ENOMEM;
|
||||
|
||||
/* copy the handle array from userspace to a kernel buffer */
|
||||
r = -EFAULT;
|
||||
if (likely(info_size == args->in.bo_info_size)) {
|
||||
unsigned long bytes = args->in.bo_number *
|
||||
args->in.bo_info_size;
|
||||
if (likely(info_size == in->bo_info_size)) {
|
||||
unsigned long bytes = in->bo_number *
|
||||
in->bo_info_size;
|
||||
|
||||
if (copy_from_user(info, uptr, bytes))
|
||||
goto error_free;
|
||||
|
||||
} else {
|
||||
unsigned long bytes = min(args->in.bo_info_size, info_size);
|
||||
unsigned long bytes = min(in->bo_info_size, info_size);
|
||||
unsigned i;
|
||||
|
||||
memset(info, 0, args->in.bo_number * info_size);
|
||||
for (i = 0; i < args->in.bo_number; ++i) {
|
||||
memset(info, 0, in->bo_number * info_size);
|
||||
for (i = 0; i < in->bo_number; ++i) {
|
||||
if (copy_from_user(&info[i], uptr, bytes))
|
||||
goto error_free;
|
||||
|
||||
uptr += args->in.bo_info_size;
|
||||
uptr += in->bo_info_size;
|
||||
}
|
||||
}
|
||||
|
||||
*info_param = info;
|
||||
return 0;
|
||||
|
||||
error_free:
|
||||
kvfree(info);
|
||||
return r;
|
||||
}
|
||||
|
||||
int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp)
|
||||
{
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct amdgpu_fpriv *fpriv = filp->driver_priv;
|
||||
union drm_amdgpu_bo_list *args = data;
|
||||
uint32_t handle = args->in.list_handle;
|
||||
struct drm_amdgpu_bo_list_entry *info = NULL;
|
||||
struct amdgpu_bo_list *list;
|
||||
int r;
|
||||
|
||||
r = amdgpu_bo_create_list_entry_array(&args->in, &info);
|
||||
if (r)
|
||||
goto error_free;
|
||||
|
||||
switch (args->in.operation) {
|
||||
case AMDGPU_BO_LIST_OP_CREATE:
|
||||
r = amdgpu_bo_list_create(adev, filp, info, args->in.bo_number,
|
||||
&handle);
|
||||
&list);
|
||||
if (r)
|
||||
goto error_free;
|
||||
|
||||
mutex_lock(&fpriv->bo_list_lock);
|
||||
r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL);
|
||||
mutex_unlock(&fpriv->bo_list_lock);
|
||||
if (r < 0) {
|
||||
amdgpu_bo_list_free(list);
|
||||
return r;
|
||||
}
|
||||
|
||||
handle = r;
|
||||
break;
|
||||
|
||||
case AMDGPU_BO_LIST_OP_DESTROY:
|
||||
|
@ -345,6 +360,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
|
|||
return 0;
|
||||
|
||||
error_free:
|
||||
kvfree(info);
|
||||
if (info)
|
||||
kvfree(info);
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -66,11 +66,35 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
|
||||
static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
|
||||
struct drm_amdgpu_bo_list_in *data)
|
||||
{
|
||||
int r;
|
||||
struct drm_amdgpu_bo_list_entry *info = NULL;
|
||||
|
||||
r = amdgpu_bo_create_list_entry_array(data, &info);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number,
|
||||
&p->bo_list);
|
||||
if (r)
|
||||
goto error_free;
|
||||
|
||||
kvfree(info);
|
||||
return 0;
|
||||
|
||||
error_free:
|
||||
if (info)
|
||||
kvfree(info);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs)
|
||||
{
|
||||
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
|
||||
struct amdgpu_vm *vm = &fpriv->vm;
|
||||
union drm_amdgpu_cs *cs = data;
|
||||
uint64_t *chunk_array_user;
|
||||
uint64_t *chunk_array;
|
||||
unsigned size, num_ibs = 0;
|
||||
|
@ -164,6 +188,19 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
|
|||
|
||||
break;
|
||||
|
||||
case AMDGPU_CHUNK_ID_BO_HANDLES:
|
||||
size = sizeof(struct drm_amdgpu_bo_list_in);
|
||||
if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
|
||||
ret = -EINVAL;
|
||||
goto free_partial_kdata;
|
||||
}
|
||||
|
||||
ret = amdgpu_cs_bo_handles_chunk(p, p->chunks[i].kdata);
|
||||
if (ret)
|
||||
goto free_partial_kdata;
|
||||
|
||||
break;
|
||||
|
||||
case AMDGPU_CHUNK_ID_DEPENDENCIES:
|
||||
case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
|
||||
case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
|
||||
|
@ -187,6 +224,10 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
|
|||
if (p->uf_entry.robj)
|
||||
p->job->uf_addr = uf_offset;
|
||||
kfree(chunk_array);
|
||||
|
||||
/* Use this opportunity to fill in task info for the vm */
|
||||
amdgpu_vm_set_task_info(vm);
|
||||
|
||||
return 0;
|
||||
|
||||
free_all_kdata:
|
||||
|
@ -258,7 +299,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
|
|||
return;
|
||||
}
|
||||
|
||||
total_vram = adev->gmc.real_vram_size - adev->vram_pin_size;
|
||||
total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
|
||||
used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
|
||||
free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
|
||||
|
||||
|
@ -530,7 +571,12 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
|||
|
||||
INIT_LIST_HEAD(&p->validated);
|
||||
|
||||
p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
|
||||
/* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
|
||||
if (!p->bo_list)
|
||||
p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
|
||||
else
|
||||
mutex_lock(&p->bo_list->lock);
|
||||
|
||||
if (p->bo_list) {
|
||||
amdgpu_bo_list_get_list(p->bo_list, &p->validated);
|
||||
if (p->bo_list->first_userptr != p->bo_list->num_entries)
|
||||
|
@ -866,11 +912,11 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
|
|||
{
|
||||
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
|
||||
struct amdgpu_vm *vm = &fpriv->vm;
|
||||
struct amdgpu_ring *ring = p->job->ring;
|
||||
struct amdgpu_ring *ring = p->ring;
|
||||
int r;
|
||||
|
||||
/* Only for UVD/VCE VM emulation */
|
||||
if (p->job->ring->funcs->parse_cs) {
|
||||
if (p->ring->funcs->parse_cs) {
|
||||
unsigned i, j;
|
||||
|
||||
for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
|
||||
|
@ -928,6 +974,10 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
|
|||
r = amdgpu_bo_vm_update_pte(p);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
return amdgpu_cs_sync_rings(p);
|
||||
|
@ -980,10 +1030,10 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
|
|||
}
|
||||
}
|
||||
|
||||
if (parser->job->ring && parser->job->ring != ring)
|
||||
if (parser->ring && parser->ring != ring)
|
||||
return -EINVAL;
|
||||
|
||||
parser->job->ring = ring;
|
||||
parser->ring = ring;
|
||||
|
||||
r = amdgpu_ib_get(adev, vm,
|
||||
ring->funcs->parse_cs ? chunk_ib->ib_bytes : 0,
|
||||
|
@ -1002,11 +1052,11 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
|
|||
|
||||
/* UVD & VCE fw doesn't support user fences */
|
||||
if (parser->job->uf_addr && (
|
||||
parser->job->ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
|
||||
parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
|
||||
parser->ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
|
||||
parser->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
|
||||
return -EINVAL;
|
||||
|
||||
return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->job->ring->idx);
|
||||
return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->ring->idx);
|
||||
}
|
||||
|
||||
static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
|
||||
|
@ -1157,8 +1207,9 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
|
|||
static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
|
||||
union drm_amdgpu_cs *cs)
|
||||
{
|
||||
struct amdgpu_ring *ring = p->job->ring;
|
||||
struct amdgpu_ring *ring = p->ring;
|
||||
struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
|
||||
enum drm_sched_priority priority;
|
||||
struct amdgpu_job *job;
|
||||
unsigned i;
|
||||
uint64_t seq;
|
||||
|
@ -1189,7 +1240,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
|
|||
}
|
||||
|
||||
job->owner = p->filp;
|
||||
job->fence_ctx = entity->fence_context;
|
||||
p->fence = dma_fence_get(&job->base.s_fence->finished);
|
||||
|
||||
r = amdgpu_ctx_add_fence(p->ctx, ring, p->fence, &seq);
|
||||
|
@ -1207,11 +1257,14 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
|
|||
job->uf_sequence = seq;
|
||||
|
||||
amdgpu_job_free_resources(job);
|
||||
amdgpu_ring_priority_get(job->ring, job->base.s_priority);
|
||||
|
||||
trace_amdgpu_cs_ioctl(job);
|
||||
priority = job->base.s_priority;
|
||||
drm_sched_entity_push_job(&job->base, entity);
|
||||
|
||||
ring = to_amdgpu_ring(entity->sched);
|
||||
amdgpu_ring_priority_get(ring, priority);
|
||||
|
||||
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
|
||||
amdgpu_mn_unlock(p->mn);
|
||||
|
||||
|
|
|
@ -90,8 +90,8 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
|
|||
if (ring == &adev->gfx.kiq.ring)
|
||||
continue;
|
||||
|
||||
r = drm_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
|
||||
rq, &ctx->guilty);
|
||||
r = drm_sched_entity_init(&ctx->rings[i].entity,
|
||||
&rq, 1, &ctx->guilty);
|
||||
if (r)
|
||||
goto failed;
|
||||
}
|
||||
|
|
|
@ -2200,7 +2200,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
|
|||
case CHIP_VEGA10:
|
||||
case CHIP_VEGA12:
|
||||
case CHIP_VEGA20:
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
||||
#ifdef CONFIG_X86
|
||||
case CHIP_RAVEN:
|
||||
#endif
|
||||
return amdgpu_dc != 0;
|
||||
|
@ -2758,11 +2758,10 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
|
|||
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
|
||||
r = amdgpu_bo_reserve(aobj, true);
|
||||
if (r == 0) {
|
||||
r = amdgpu_bo_pin(aobj,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&amdgpu_crtc->cursor_addr);
|
||||
r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
|
||||
if (r != 0)
|
||||
DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
|
||||
amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
|
||||
amdgpu_bo_unreserve(aobj);
|
||||
}
|
||||
}
|
||||
|
@ -3254,7 +3253,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
|||
|
||||
kthread_park(ring->sched.thread);
|
||||
|
||||
if (job && job->ring->idx != i)
|
||||
if (job && job->base.sched == &ring->sched)
|
||||
continue;
|
||||
|
||||
drm_sched_hw_job_reset(&ring->sched, &job->base);
|
||||
|
@ -3278,7 +3277,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
|||
* or all rings (in the case @job is NULL)
|
||||
* after above amdgpu_reset accomplished
|
||||
*/
|
||||
if ((!job || job->ring->idx == i) && !r)
|
||||
if ((!job || job->base.sched == &ring->sched) && !r)
|
||||
drm_sched_job_recovery(&ring->sched);
|
||||
|
||||
kthread_unpark(ring->sched.thread);
|
||||
|
|
|
@ -157,7 +157,6 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
|
|||
struct amdgpu_bo *new_abo;
|
||||
unsigned long flags;
|
||||
u64 tiling_flags;
|
||||
u64 base;
|
||||
int i, r;
|
||||
|
||||
work = kzalloc(sizeof *work, GFP_KERNEL);
|
||||
|
@ -189,12 +188,18 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
|
|||
goto cleanup;
|
||||
}
|
||||
|
||||
r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev), &base);
|
||||
r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev));
|
||||
if (unlikely(r != 0)) {
|
||||
DRM_ERROR("failed to pin new abo buffer before flip\n");
|
||||
goto unreserve;
|
||||
}
|
||||
|
||||
r = amdgpu_ttm_alloc_gart(&new_abo->tbo);
|
||||
if (unlikely(r != 0)) {
|
||||
DRM_ERROR("%p bind failed\n", new_abo);
|
||||
goto unpin;
|
||||
}
|
||||
|
||||
r = reservation_object_get_fences_rcu(new_abo->tbo.resv, &work->excl,
|
||||
&work->shared_count,
|
||||
&work->shared);
|
||||
|
@ -206,7 +211,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
|
|||
amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags);
|
||||
amdgpu_bo_unreserve(new_abo);
|
||||
|
||||
work->base = base;
|
||||
work->base = amdgpu_bo_gpu_offset(new_abo);
|
||||
work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
|
||||
amdgpu_get_vblank_counter_kms(dev, work->crtc_id);
|
||||
|
||||
|
|
|
@ -69,9 +69,10 @@
|
|||
* - 3.24.0 - Add high priority compute support for gfx9
|
||||
* - 3.25.0 - Add support for sensor query info (stable pstate sclk/mclk).
|
||||
* - 3.26.0 - GFX9: Process AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE.
|
||||
* - 3.27.0 - Add new chunk to to AMDGPU_CS to enable BO_LIST creation.
|
||||
*/
|
||||
#define KMS_DRIVER_MAJOR 3
|
||||
#define KMS_DRIVER_MINOR 26
|
||||
#define KMS_DRIVER_MINOR 27
|
||||
#define KMS_DRIVER_PATCHLEVEL 0
|
||||
|
||||
int amdgpu_vram_limit = 0;
|
||||
|
@ -103,11 +104,8 @@ int amdgpu_vram_page_split = 512;
|
|||
int amdgpu_vm_update_mode = -1;
|
||||
int amdgpu_exp_hw_support = 0;
|
||||
int amdgpu_dc = -1;
|
||||
int amdgpu_dc_log = 0;
|
||||
int amdgpu_sched_jobs = 32;
|
||||
int amdgpu_sched_hw_submission = 2;
|
||||
int amdgpu_no_evict = 0;
|
||||
int amdgpu_direct_gma_size = 0;
|
||||
uint amdgpu_pcie_gen_cap = 0;
|
||||
uint amdgpu_pcie_lane_cap = 0;
|
||||
uint amdgpu_cg_mask = 0xffffffff;
|
||||
|
@ -340,9 +338,6 @@ module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
|
|||
MODULE_PARM_DESC(dc, "Display Core driver (1 = enable, 0 = disable, -1 = auto (default))");
|
||||
module_param_named(dc, amdgpu_dc, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(dc_log, "Display Core Log Level (0 = minimal (default), 1 = chatty");
|
||||
module_param_named(dc_log, amdgpu_dc_log, int, 0444);
|
||||
|
||||
/**
|
||||
* DOC: sched_jobs (int)
|
||||
* Override the max number of jobs supported in the sw queue. The default is 32.
|
||||
|
@ -365,12 +360,6 @@ module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
|
|||
MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))");
|
||||
module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, uint, 0444);
|
||||
|
||||
MODULE_PARM_DESC(no_evict, "Support pinning request from user space (1 = enable, 0 = disable (default))");
|
||||
module_param_named(no_evict, amdgpu_no_evict, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(direct_gma_size, "Direct GMA size in megabytes (max 96MB)");
|
||||
module_param_named(direct_gma_size, amdgpu_direct_gma_size, int, 0444);
|
||||
|
||||
/**
|
||||
* DOC: pcie_gen_cap (uint)
|
||||
* Override PCIE gen speed capabilities. See the CAIL flags in drivers/gpu/drm/amd/include/amd_pcie.h.
|
||||
|
|
|
@ -168,11 +168,19 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
|
|||
}
|
||||
|
||||
|
||||
ret = amdgpu_bo_pin(abo, domain, NULL);
|
||||
ret = amdgpu_bo_pin(abo, domain);
|
||||
if (ret) {
|
||||
amdgpu_bo_unreserve(abo);
|
||||
goto out_unref;
|
||||
}
|
||||
|
||||
ret = amdgpu_ttm_alloc_gart(&abo->tbo);
|
||||
if (ret) {
|
||||
amdgpu_bo_unreserve(abo);
|
||||
dev_err(adev->dev, "%p bind failed\n", abo);
|
||||
goto out_unref;
|
||||
}
|
||||
|
||||
ret = amdgpu_bo_kmap(abo, NULL);
|
||||
amdgpu_bo_unreserve(abo);
|
||||
if (ret) {
|
||||
|
|
|
@ -143,14 +143,12 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
|
|||
*/
|
||||
int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev)
|
||||
{
|
||||
uint64_t gpu_addr;
|
||||
int r;
|
||||
|
||||
r = amdgpu_bo_reserve(adev->gart.robj, false);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
r = amdgpu_bo_pin(adev->gart.robj,
|
||||
AMDGPU_GEM_DOMAIN_VRAM, &gpu_addr);
|
||||
r = amdgpu_bo_pin(adev->gart.robj, AMDGPU_GEM_DOMAIN_VRAM);
|
||||
if (r) {
|
||||
amdgpu_bo_unreserve(adev->gart.robj);
|
||||
return r;
|
||||
|
@ -159,7 +157,7 @@ int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev)
|
|||
if (r)
|
||||
amdgpu_bo_unpin(adev->gart.robj);
|
||||
amdgpu_bo_unreserve(adev->gart.robj);
|
||||
adev->gart.table_addr = gpu_addr;
|
||||
adev->gart.table_addr = amdgpu_bo_gpu_offset(adev->gart.robj);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
|
|
@ -139,7 +139,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
|||
/* ring tests don't use a job */
|
||||
if (job) {
|
||||
vm = job->vm;
|
||||
fence_ctx = job->fence_ctx;
|
||||
fence_ctx = job->base.s_fence->scheduled.context;
|
||||
} else {
|
||||
vm = NULL;
|
||||
fence_ctx = 0;
|
||||
|
|
|
@ -30,14 +30,14 @@
|
|||
|
||||
static void amdgpu_job_timedout(struct drm_sched_job *s_job)
|
||||
{
|
||||
struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
|
||||
struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
|
||||
struct amdgpu_job *job = to_amdgpu_job(s_job);
|
||||
|
||||
DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n",
|
||||
job->base.sched->name,
|
||||
atomic_read(&job->ring->fence_drv.last_seq),
|
||||
job->ring->fence_drv.sync_seq);
|
||||
DRM_ERROR("ring %s timeout, signaled seq=%u, emitted seq=%u\n",
|
||||
job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
|
||||
ring->fence_drv.sync_seq);
|
||||
|
||||
amdgpu_device_gpu_recover(job->adev, job, false);
|
||||
amdgpu_device_gpu_recover(ring->adev, job, false);
|
||||
}
|
||||
|
||||
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
|
||||
|
@ -54,7 +54,11 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
|
|||
if (!*job)
|
||||
return -ENOMEM;
|
||||
|
||||
(*job)->adev = adev;
|
||||
/*
|
||||
* Initialize the scheduler to at least some ring so that we always
|
||||
* have a pointer to adev.
|
||||
*/
|
||||
(*job)->base.sched = &adev->rings[0]->sched;
|
||||
(*job)->vm = vm;
|
||||
(*job)->ibs = (void *)&(*job)[1];
|
||||
(*job)->num_ibs = num_ibs;
|
||||
|
@ -86,6 +90,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
|
|||
|
||||
void amdgpu_job_free_resources(struct amdgpu_job *job)
|
||||
{
|
||||
struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
|
||||
struct dma_fence *f;
|
||||
unsigned i;
|
||||
|
||||
|
@ -93,14 +98,15 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
|
|||
f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
|
||||
|
||||
for (i = 0; i < job->num_ibs; ++i)
|
||||
amdgpu_ib_free(job->adev, &job->ibs[i], f);
|
||||
amdgpu_ib_free(ring->adev, &job->ibs[i], f);
|
||||
}
|
||||
|
||||
static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
|
||||
{
|
||||
struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
|
||||
struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
|
||||
struct amdgpu_job *job = to_amdgpu_job(s_job);
|
||||
|
||||
amdgpu_ring_priority_put(job->ring, s_job->s_priority);
|
||||
amdgpu_ring_priority_put(ring, s_job->s_priority);
|
||||
dma_fence_put(job->fence);
|
||||
amdgpu_sync_free(&job->sync);
|
||||
amdgpu_sync_free(&job->sched_sync);
|
||||
|
@ -117,50 +123,68 @@ void amdgpu_job_free(struct amdgpu_job *job)
|
|||
kfree(job);
|
||||
}
|
||||
|
||||
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
|
||||
struct drm_sched_entity *entity, void *owner,
|
||||
struct dma_fence **f)
|
||||
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
|
||||
void *owner, struct dma_fence **f)
|
||||
{
|
||||
enum drm_sched_priority priority;
|
||||
struct amdgpu_ring *ring;
|
||||
int r;
|
||||
job->ring = ring;
|
||||
|
||||
if (!f)
|
||||
return -EINVAL;
|
||||
|
||||
r = drm_sched_job_init(&job->base, &ring->sched, entity, owner);
|
||||
r = drm_sched_job_init(&job->base, entity->sched, entity, owner);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
job->owner = owner;
|
||||
job->fence_ctx = entity->fence_context;
|
||||
*f = dma_fence_get(&job->base.s_fence->finished);
|
||||
amdgpu_job_free_resources(job);
|
||||
amdgpu_ring_priority_get(job->ring, job->base.s_priority);
|
||||
priority = job->base.s_priority;
|
||||
drm_sched_entity_push_job(&job->base, entity);
|
||||
|
||||
ring = to_amdgpu_ring(entity->sched);
|
||||
amdgpu_ring_priority_get(ring, priority);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
int r;
|
||||
|
||||
job->base.sched = &ring->sched;
|
||||
r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, NULL, fence);
|
||||
job->fence = dma_fence_get(*fence);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
amdgpu_job_free(job);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
|
||||
struct drm_sched_entity *s_entity)
|
||||
{
|
||||
struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->sched);
|
||||
struct amdgpu_job *job = to_amdgpu_job(sched_job);
|
||||
struct amdgpu_vm *vm = job->vm;
|
||||
struct dma_fence *fence;
|
||||
bool explicit = false;
|
||||
int r;
|
||||
struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync, &explicit);
|
||||
|
||||
fence = amdgpu_sync_get_fence(&job->sync, &explicit);
|
||||
if (fence && explicit) {
|
||||
if (drm_sched_dependency_optimized(fence, s_entity)) {
|
||||
r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence, false);
|
||||
r = amdgpu_sync_fence(ring->adev, &job->sched_sync,
|
||||
fence, false);
|
||||
if (r)
|
||||
DRM_ERROR("Error adding fence to sync (%d)\n", r);
|
||||
DRM_ERROR("Error adding fence (%d)\n", r);
|
||||
}
|
||||
}
|
||||
|
||||
while (fence == NULL && vm && !job->vmid) {
|
||||
struct amdgpu_ring *ring = job->ring;
|
||||
|
||||
r = amdgpu_vmid_grab(vm, ring, &job->sync,
|
||||
&job->base.s_fence->finished,
|
||||
job);
|
||||
|
@ -175,30 +199,25 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
|
|||
|
||||
static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
|
||||
{
|
||||
struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
|
||||
struct dma_fence *fence = NULL, *finished;
|
||||
struct amdgpu_device *adev;
|
||||
struct amdgpu_job *job;
|
||||
int r;
|
||||
|
||||
if (!sched_job) {
|
||||
DRM_ERROR("job is null\n");
|
||||
return NULL;
|
||||
}
|
||||
job = to_amdgpu_job(sched_job);
|
||||
finished = &job->base.s_fence->finished;
|
||||
adev = job->adev;
|
||||
|
||||
BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
|
||||
|
||||
trace_amdgpu_sched_run_job(job);
|
||||
|
||||
if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
|
||||
if (job->vram_lost_counter != atomic_read(&ring->adev->vram_lost_counter))
|
||||
dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */
|
||||
|
||||
if (finished->error < 0) {
|
||||
DRM_INFO("Skip scheduling IBs!\n");
|
||||
} else {
|
||||
r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job,
|
||||
r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
|
||||
&fence);
|
||||
if (r)
|
||||
DRM_ERROR("Error scheduling IBs (%d)\n", r);
|
||||
|
|
74
drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
Normal file
74
drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
Normal file
|
@ -0,0 +1,74 @@
|
|||
/*
|
||||
* Copyright 2018 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#ifndef __AMDGPU_JOB_H__
|
||||
#define __AMDGPU_JOB_H__
|
||||
|
||||
/* bit set means command submit involves a preamble IB */
|
||||
#define AMDGPU_PREAMBLE_IB_PRESENT (1 << 0)
|
||||
/* bit set means preamble IB is first presented in belonging context */
|
||||
#define AMDGPU_PREAMBLE_IB_PRESENT_FIRST (1 << 1)
|
||||
/* bit set means context switch occured */
|
||||
#define AMDGPU_HAVE_CTX_SWITCH (1 << 2)
|
||||
|
||||
#define to_amdgpu_job(sched_job) \
|
||||
container_of((sched_job), struct amdgpu_job, base)
|
||||
|
||||
struct amdgpu_fence;
|
||||
|
||||
struct amdgpu_job {
|
||||
struct drm_sched_job base;
|
||||
struct amdgpu_vm *vm;
|
||||
struct amdgpu_sync sync;
|
||||
struct amdgpu_sync sched_sync;
|
||||
struct amdgpu_ib *ibs;
|
||||
struct dma_fence *fence; /* the hw fence */
|
||||
uint32_t preamble_status;
|
||||
uint32_t num_ibs;
|
||||
void *owner;
|
||||
bool vm_needs_flush;
|
||||
uint64_t vm_pd_addr;
|
||||
unsigned vmid;
|
||||
unsigned pasid;
|
||||
uint32_t gds_base, gds_size;
|
||||
uint32_t gws_base, gws_size;
|
||||
uint32_t oa_base, oa_size;
|
||||
uint32_t vram_lost_counter;
|
||||
|
||||
/* user fence handling */
|
||||
uint64_t uf_addr;
|
||||
uint64_t uf_sequence;
|
||||
|
||||
};
|
||||
|
||||
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
|
||||
struct amdgpu_job **job, struct amdgpu_vm *vm);
|
||||
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
|
||||
struct amdgpu_job **job);
|
||||
|
||||
void amdgpu_job_free_resources(struct amdgpu_job *job);
|
||||
void amdgpu_job_free(struct amdgpu_job *job);
|
||||
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
|
||||
void *owner, struct dma_fence **f);
|
||||
int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
|
||||
struct dma_fence **fence);
|
||||
#endif
|
|
@ -501,13 +501,13 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
|||
case AMDGPU_INFO_VRAM_GTT: {
|
||||
struct drm_amdgpu_info_vram_gtt vram_gtt;
|
||||
|
||||
vram_gtt.vram_size = adev->gmc.real_vram_size;
|
||||
vram_gtt.vram_size -= adev->vram_pin_size;
|
||||
vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size;
|
||||
vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size);
|
||||
vram_gtt.vram_size = adev->gmc.real_vram_size -
|
||||
atomic64_read(&adev->vram_pin_size);
|
||||
vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size -
|
||||
atomic64_read(&adev->visible_pin_size);
|
||||
vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
|
||||
vram_gtt.gtt_size *= PAGE_SIZE;
|
||||
vram_gtt.gtt_size -= adev->gart_pin_size;
|
||||
vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size);
|
||||
return copy_to_user(out, &vram_gtt,
|
||||
min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
|
||||
}
|
||||
|
@ -516,17 +516,16 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
|||
|
||||
memset(&mem, 0, sizeof(mem));
|
||||
mem.vram.total_heap_size = adev->gmc.real_vram_size;
|
||||
mem.vram.usable_heap_size =
|
||||
adev->gmc.real_vram_size - adev->vram_pin_size;
|
||||
mem.vram.usable_heap_size = adev->gmc.real_vram_size -
|
||||
atomic64_read(&adev->vram_pin_size);
|
||||
mem.vram.heap_usage =
|
||||
amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
|
||||
mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
|
||||
|
||||
mem.cpu_accessible_vram.total_heap_size =
|
||||
adev->gmc.visible_vram_size;
|
||||
mem.cpu_accessible_vram.usable_heap_size =
|
||||
adev->gmc.visible_vram_size -
|
||||
(adev->vram_pin_size - adev->invisible_pin_size);
|
||||
mem.cpu_accessible_vram.usable_heap_size = adev->gmc.visible_vram_size -
|
||||
atomic64_read(&adev->visible_pin_size);
|
||||
mem.cpu_accessible_vram.heap_usage =
|
||||
amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
|
||||
mem.cpu_accessible_vram.max_allocation =
|
||||
|
@ -534,8 +533,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
|||
|
||||
mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size;
|
||||
mem.gtt.total_heap_size *= PAGE_SIZE;
|
||||
mem.gtt.usable_heap_size = mem.gtt.total_heap_size
|
||||
- adev->gart_pin_size;
|
||||
mem.gtt.usable_heap_size = mem.gtt.total_heap_size -
|
||||
atomic64_read(&adev->gart_pin_size);
|
||||
mem.gtt.heap_usage =
|
||||
amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
|
||||
mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
|
||||
|
|
|
@ -63,11 +63,35 @@ static bool amdgpu_need_backup(struct amdgpu_device *adev)
|
|||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_bo_subtract_pin_size - Remove BO from pin_size accounting
|
||||
*
|
||||
* @bo: &amdgpu_bo buffer object
|
||||
*
|
||||
* This function is called when a BO stops being pinned, and updates the
|
||||
* &amdgpu_device pin_size values accordingly.
|
||||
*/
|
||||
static void amdgpu_bo_subtract_pin_size(struct amdgpu_bo *bo)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
|
||||
if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
|
||||
atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
|
||||
atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
|
||||
&adev->visible_pin_size);
|
||||
} else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
|
||||
atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
|
||||
}
|
||||
}
|
||||
|
||||
static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
|
||||
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
|
||||
|
||||
if (WARN_ON_ONCE(bo->pin_count > 0))
|
||||
amdgpu_bo_subtract_pin_size(bo);
|
||||
|
||||
if (bo->kfd_bo)
|
||||
amdgpu_amdkfd_unreserve_system_memory_limit(bo);
|
||||
|
||||
|
@ -252,22 +276,33 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
|
|||
goto error_free;
|
||||
}
|
||||
|
||||
r = amdgpu_bo_pin(*bo_ptr, domain, gpu_addr);
|
||||
r = amdgpu_bo_pin(*bo_ptr, domain);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
|
||||
goto error_unreserve;
|
||||
}
|
||||
|
||||
r = amdgpu_ttm_alloc_gart(&(*bo_ptr)->tbo);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "%p bind failed\n", *bo_ptr);
|
||||
goto error_unpin;
|
||||
}
|
||||
|
||||
if (gpu_addr)
|
||||
*gpu_addr = amdgpu_bo_gpu_offset(*bo_ptr);
|
||||
|
||||
if (cpu_addr) {
|
||||
r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
|
||||
goto error_unreserve;
|
||||
goto error_unpin;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
error_unpin:
|
||||
amdgpu_bo_unpin(*bo_ptr);
|
||||
error_unreserve:
|
||||
amdgpu_bo_unreserve(*bo_ptr);
|
||||
|
||||
|
@ -817,7 +852,6 @@ void amdgpu_bo_unref(struct amdgpu_bo **bo)
|
|||
* @domain: domain to be pinned to
|
||||
* @min_offset: the start of requested address range
|
||||
* @max_offset: the end of requested address range
|
||||
* @gpu_addr: GPU offset of the &amdgpu_bo buffer object
|
||||
*
|
||||
* Pins the buffer object according to requested domain and address range. If
|
||||
* the memory is unbound gart memory, binds the pages into gart table. Adjusts
|
||||
|
@ -835,8 +869,7 @@ void amdgpu_bo_unref(struct amdgpu_bo **bo)
|
|||
* 0 for success or a negative error code on failure.
|
||||
*/
|
||||
int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
|
||||
u64 min_offset, u64 max_offset,
|
||||
u64 *gpu_addr)
|
||||
u64 min_offset, u64 max_offset)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
struct ttm_operation_ctx ctx = { false, false };
|
||||
|
@ -868,8 +901,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
|
|||
return -EINVAL;
|
||||
|
||||
bo->pin_count++;
|
||||
if (gpu_addr)
|
||||
*gpu_addr = amdgpu_bo_gpu_offset(bo);
|
||||
|
||||
if (max_offset != 0) {
|
||||
u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset;
|
||||
|
@ -905,22 +936,15 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
|
|||
goto error;
|
||||
}
|
||||
|
||||
r = amdgpu_ttm_alloc_gart(&bo->tbo);
|
||||
if (unlikely(r)) {
|
||||
dev_err(adev->dev, "%p bind failed\n", bo);
|
||||
goto error;
|
||||
}
|
||||
|
||||
bo->pin_count = 1;
|
||||
if (gpu_addr != NULL)
|
||||
*gpu_addr = amdgpu_bo_gpu_offset(bo);
|
||||
|
||||
domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
|
||||
if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
|
||||
adev->vram_pin_size += amdgpu_bo_size(bo);
|
||||
adev->invisible_pin_size += amdgpu_vram_mgr_bo_invisible_size(bo);
|
||||
atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
|
||||
atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
|
||||
&adev->visible_pin_size);
|
||||
} else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
|
||||
adev->gart_pin_size += amdgpu_bo_size(bo);
|
||||
atomic64_add(amdgpu_bo_size(bo), &adev->gart_pin_size);
|
||||
}
|
||||
|
||||
error:
|
||||
|
@ -931,7 +955,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
|
|||
* amdgpu_bo_pin - pin an &amdgpu_bo buffer object
|
||||
* @bo: &amdgpu_bo buffer object to be pinned
|
||||
* @domain: domain to be pinned to
|
||||
* @gpu_addr: GPU offset of the &amdgpu_bo buffer object
|
||||
*
|
||||
* A simple wrapper to amdgpu_bo_pin_restricted().
|
||||
* Provides a simpler API for buffers that do not have any strict restrictions
|
||||
|
@ -940,9 +963,9 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
|
|||
* Returns:
|
||||
* 0 for success or a negative error code on failure.
|
||||
*/
|
||||
int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
|
||||
int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
|
||||
{
|
||||
return amdgpu_bo_pin_restricted(bo, domain, 0, 0, gpu_addr);
|
||||
return amdgpu_bo_pin_restricted(bo, domain, 0, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -969,12 +992,7 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
|
|||
if (bo->pin_count)
|
||||
return 0;
|
||||
|
||||
if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
|
||||
adev->vram_pin_size -= amdgpu_bo_size(bo);
|
||||
adev->invisible_pin_size -= amdgpu_vram_mgr_bo_invisible_size(bo);
|
||||
} else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
|
||||
adev->gart_pin_size -= amdgpu_bo_size(bo);
|
||||
}
|
||||
amdgpu_bo_subtract_pin_size(bo);
|
||||
|
||||
for (i = 0; i < bo->placement.num_placement; i++) {
|
||||
bo->placements[i].lpfn = 0;
|
||||
|
|
|
@ -252,10 +252,9 @@ void *amdgpu_bo_kptr(struct amdgpu_bo *bo);
|
|||
void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
|
||||
struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
|
||||
void amdgpu_bo_unref(struct amdgpu_bo **bo);
|
||||
int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr);
|
||||
int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain);
|
||||
int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
|
||||
u64 min_offset, u64 max_offset,
|
||||
u64 *gpu_addr);
|
||||
u64 min_offset, u64 max_offset);
|
||||
int amdgpu_bo_unpin(struct amdgpu_bo *bo);
|
||||
int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
|
||||
int amdgpu_bo_init(struct amdgpu_device *adev);
|
||||
|
|
|
@ -606,6 +606,42 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
|
|||
return snprintf(buf, PAGE_SIZE, "\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* Worst case: 32 bits individually specified, in octal at 12 characters
|
||||
* per line (+1 for \n).
|
||||
*/
|
||||
#define AMDGPU_MASK_BUF_MAX (32 * 13)
|
||||
|
||||
static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
|
||||
{
|
||||
int ret;
|
||||
long level;
|
||||
char *sub_str = NULL;
|
||||
char *tmp;
|
||||
char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
|
||||
const char delimiter[3] = {' ', '\n', '\0'};
|
||||
size_t bytes;
|
||||
|
||||
*mask = 0;
|
||||
|
||||
bytes = min(count, sizeof(buf_cpy) - 1);
|
||||
memcpy(buf_cpy, buf, bytes);
|
||||
buf_cpy[bytes] = '\0';
|
||||
tmp = buf_cpy;
|
||||
while (tmp[0]) {
|
||||
sub_str = strsep(&tmp, delimiter);
|
||||
if (strlen(sub_str)) {
|
||||
ret = kstrtol(sub_str, 0, &level);
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
*mask |= 1 << level;
|
||||
} else
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf,
|
||||
|
@ -614,32 +650,15 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
|
|||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
int ret;
|
||||
long level;
|
||||
uint32_t mask = 0;
|
||||
char *sub_str = NULL;
|
||||
char *tmp;
|
||||
char buf_cpy[count];
|
||||
const char delimiter[3] = {' ', '\n', '\0'};
|
||||
|
||||
memcpy(buf_cpy, buf, count+1);
|
||||
tmp = buf_cpy;
|
||||
while (tmp[0]) {
|
||||
sub_str = strsep(&tmp, delimiter);
|
||||
if (strlen(sub_str)) {
|
||||
ret = kstrtol(sub_str, 0, &level);
|
||||
ret = amdgpu_read_mask(buf, count, &mask);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (ret) {
|
||||
count = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
mask |= 1 << level;
|
||||
} else
|
||||
break;
|
||||
}
|
||||
if (adev->powerplay.pp_funcs->force_clock_level)
|
||||
amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
|
||||
|
||||
fail:
|
||||
return count;
|
||||
}
|
||||
|
||||
|
@ -664,32 +683,15 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
|
|||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
int ret;
|
||||
long level;
|
||||
uint32_t mask = 0;
|
||||
char *sub_str = NULL;
|
||||
char *tmp;
|
||||
char buf_cpy[count];
|
||||
const char delimiter[3] = {' ', '\n', '\0'};
|
||||
|
||||
memcpy(buf_cpy, buf, count+1);
|
||||
tmp = buf_cpy;
|
||||
while (tmp[0]) {
|
||||
sub_str = strsep(&tmp, delimiter);
|
||||
if (strlen(sub_str)) {
|
||||
ret = kstrtol(sub_str, 0, &level);
|
||||
ret = amdgpu_read_mask(buf, count, &mask);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (ret) {
|
||||
count = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
mask |= 1 << level;
|
||||
} else
|
||||
break;
|
||||
}
|
||||
if (adev->powerplay.pp_funcs->force_clock_level)
|
||||
amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
|
||||
|
||||
fail:
|
||||
return count;
|
||||
}
|
||||
|
||||
|
@ -714,33 +716,15 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
|
|||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
int ret;
|
||||
long level;
|
||||
uint32_t mask = 0;
|
||||
char *sub_str = NULL;
|
||||
char *tmp;
|
||||
char buf_cpy[count];
|
||||
const char delimiter[3] = {' ', '\n', '\0'};
|
||||
|
||||
memcpy(buf_cpy, buf, count+1);
|
||||
tmp = buf_cpy;
|
||||
ret = amdgpu_read_mask(buf, count, &mask);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
while (tmp[0]) {
|
||||
sub_str = strsep(&tmp, delimiter);
|
||||
if (strlen(sub_str)) {
|
||||
ret = kstrtol(sub_str, 0, &level);
|
||||
|
||||
if (ret) {
|
||||
count = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
mask |= 1 << level;
|
||||
} else
|
||||
break;
|
||||
}
|
||||
if (adev->powerplay.pp_funcs->force_clock_level)
|
||||
amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
|
||||
|
||||
fail:
|
||||
return count;
|
||||
}
|
||||
|
||||
|
|
|
@ -232,7 +232,7 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
|
|||
}
|
||||
|
||||
/* pin buffer into GTT */
|
||||
r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
|
||||
r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
|
||||
if (r)
|
||||
goto error_unreserve;
|
||||
|
||||
|
|
|
@ -211,7 +211,8 @@ void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
|
|||
if (!ring->funcs->set_priority)
|
||||
return;
|
||||
|
||||
atomic_inc(&ring->num_jobs[priority]);
|
||||
if (atomic_inc_return(&ring->num_jobs[priority]) <= 0)
|
||||
return;
|
||||
|
||||
mutex_lock(&ring->priority_mutex);
|
||||
if (priority <= ring->priority)
|
||||
|
|
|
@ -44,6 +44,8 @@
|
|||
#define AMDGPU_FENCE_FLAG_INT (1 << 1)
|
||||
#define AMDGPU_FENCE_FLAG_TC_WB_ONLY (1 << 2)
|
||||
|
||||
#define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
|
||||
|
||||
enum amdgpu_ring_type {
|
||||
AMDGPU_RING_TYPE_GFX,
|
||||
AMDGPU_RING_TYPE_COMPUTE,
|
||||
|
|
|
@ -76,11 +76,12 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
|
|||
r = amdgpu_bo_reserve(vram_obj, false);
|
||||
if (unlikely(r != 0))
|
||||
goto out_unref;
|
||||
r = amdgpu_bo_pin(vram_obj, AMDGPU_GEM_DOMAIN_VRAM, &vram_addr);
|
||||
r = amdgpu_bo_pin(vram_obj, AMDGPU_GEM_DOMAIN_VRAM);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to pin VRAM object\n");
|
||||
goto out_unres;
|
||||
}
|
||||
vram_addr = amdgpu_bo_gpu_offset(vram_obj);
|
||||
for (i = 0; i < n; i++) {
|
||||
void *gtt_map, *vram_map;
|
||||
void **gart_start, **gart_end;
|
||||
|
@ -97,11 +98,17 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
|
|||
r = amdgpu_bo_reserve(gtt_obj[i], false);
|
||||
if (unlikely(r != 0))
|
||||
goto out_lclean_unref;
|
||||
r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT, &gart_addr);
|
||||
r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to pin GTT object %d\n", i);
|
||||
goto out_lclean_unres;
|
||||
}
|
||||
r = amdgpu_ttm_alloc_gart(>t_obj[i]->tbo);
|
||||
if (r) {
|
||||
DRM_ERROR("%p bind failed\n", gtt_obj[i]);
|
||||
goto out_lclean_unpin;
|
||||
}
|
||||
gart_addr = amdgpu_bo_gpu_offset(gtt_obj[i]);
|
||||
|
||||
r = amdgpu_bo_kmap(gtt_obj[i], >t_map);
|
||||
if (r) {
|
||||
|
|
|
@ -150,10 +150,10 @@ TRACE_EVENT(amdgpu_cs,
|
|||
|
||||
TP_fast_assign(
|
||||
__entry->bo_list = p->bo_list;
|
||||
__entry->ring = p->job->ring->idx;
|
||||
__entry->ring = p->ring->idx;
|
||||
__entry->dw = p->job->ibs[i].length_dw;
|
||||
__entry->fences = amdgpu_fence_count_emitted(
|
||||
p->job->ring);
|
||||
p->ring);
|
||||
),
|
||||
TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u",
|
||||
__entry->bo_list, __entry->ring, __entry->dw,
|
||||
|
@ -178,7 +178,7 @@ TRACE_EVENT(amdgpu_cs_ioctl,
|
|||
__assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
|
||||
__entry->context = job->base.s_fence->finished.context;
|
||||
__entry->seqno = job->base.s_fence->finished.seqno;
|
||||
__entry->ring_name = job->ring->name;
|
||||
__entry->ring_name = to_amdgpu_ring(job->base.sched)->name;
|
||||
__entry->num_ibs = job->num_ibs;
|
||||
),
|
||||
TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
|
||||
|
@ -203,7 +203,7 @@ TRACE_EVENT(amdgpu_sched_run_job,
|
|||
__assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
|
||||
__entry->context = job->base.s_fence->finished.context;
|
||||
__entry->seqno = job->base.s_fence->finished.seqno;
|
||||
__entry->ring_name = job->ring->name;
|
||||
__entry->ring_name = to_amdgpu_ring(job->base.sched)->name;
|
||||
__entry->num_ibs = job->num_ibs;
|
||||
),
|
||||
TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
|
||||
|
|
|
@ -104,8 +104,6 @@ static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
|
|||
static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
|
||||
{
|
||||
struct drm_global_reference *global_ref;
|
||||
struct amdgpu_ring *ring;
|
||||
struct drm_sched_rq *rq;
|
||||
int r;
|
||||
|
||||
/* ensure reference is false in case init fails */
|
||||
|
@ -138,21 +136,10 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
|
|||
|
||||
mutex_init(&adev->mman.gtt_window_lock);
|
||||
|
||||
ring = adev->mman.buffer_funcs_ring;
|
||||
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
|
||||
r = drm_sched_entity_init(&ring->sched, &adev->mman.entity,
|
||||
rq, NULL);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed setting up TTM BO move run queue.\n");
|
||||
goto error_entity;
|
||||
}
|
||||
|
||||
adev->mman.mem_global_referenced = true;
|
||||
|
||||
return 0;
|
||||
|
||||
error_entity:
|
||||
drm_global_item_unref(&adev->mman.bo_global_ref.ref);
|
||||
error_bo:
|
||||
drm_global_item_unref(&adev->mman.mem_global_ref);
|
||||
error_mem:
|
||||
|
@ -162,8 +149,6 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
|
|||
static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->mman.mem_global_referenced) {
|
||||
drm_sched_entity_destroy(adev->mman.entity.sched,
|
||||
&adev->mman.entity);
|
||||
mutex_destroy(&adev->mman.gtt_window_lock);
|
||||
drm_global_item_unref(&adev->mman.bo_global_ref.ref);
|
||||
drm_global_item_unref(&adev->mman.mem_global_ref);
|
||||
|
@ -1695,7 +1680,7 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
|
|||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
adev->fw_vram_usage.start_offset,
|
||||
(adev->fw_vram_usage.start_offset +
|
||||
adev->fw_vram_usage.size), NULL);
|
||||
adev->fw_vram_usage.size));
|
||||
if (r)
|
||||
goto error_pin;
|
||||
r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
|
||||
|
@ -1921,10 +1906,29 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
|
|||
{
|
||||
struct ttm_mem_type_manager *man = &adev->mman.bdev.man[TTM_PL_VRAM];
|
||||
uint64_t size;
|
||||
int r;
|
||||
|
||||
if (!adev->mman.initialized || adev->in_gpu_reset)
|
||||
if (!adev->mman.initialized || adev->in_gpu_reset ||
|
||||
adev->mman.buffer_funcs_enabled == enable)
|
||||
return;
|
||||
|
||||
if (enable) {
|
||||
struct amdgpu_ring *ring;
|
||||
struct drm_sched_rq *rq;
|
||||
|
||||
ring = adev->mman.buffer_funcs_ring;
|
||||
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
|
||||
r = drm_sched_entity_init(&adev->mman.entity, &rq, 1, NULL);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
|
||||
r);
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
drm_sched_entity_destroy(adev->mman.entity.sched,
|
||||
&adev->mman.entity);
|
||||
}
|
||||
|
||||
/* this just adjusts TTM size idea, which sets lpfn to the correct value */
|
||||
if (enable)
|
||||
size = adev->gmc.real_vram_size;
|
||||
|
@ -2002,7 +2006,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
|
|||
if (r)
|
||||
goto error_free;
|
||||
|
||||
r = amdgpu_job_submit(job, ring, &adev->mman.entity,
|
||||
r = amdgpu_job_submit(job, &adev->mman.entity,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
|
||||
if (r)
|
||||
goto error_free;
|
||||
|
@ -2071,24 +2075,19 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
|
|||
|
||||
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
|
||||
WARN_ON(job->ibs[0].length_dw > num_dw);
|
||||
if (direct_submit) {
|
||||
r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
|
||||
NULL, fence);
|
||||
job->fence = dma_fence_get(*fence);
|
||||
if (r)
|
||||
DRM_ERROR("Error scheduling IBs (%d)\n", r);
|
||||
amdgpu_job_free(job);
|
||||
} else {
|
||||
r = amdgpu_job_submit(job, ring, &adev->mman.entity,
|
||||
if (direct_submit)
|
||||
r = amdgpu_job_submit_direct(job, ring, fence);
|
||||
else
|
||||
r = amdgpu_job_submit(job, &adev->mman.entity,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED, fence);
|
||||
if (r)
|
||||
goto error_free;
|
||||
}
|
||||
if (r)
|
||||
goto error_free;
|
||||
|
||||
return r;
|
||||
|
||||
error_free:
|
||||
amdgpu_job_free(job);
|
||||
DRM_ERROR("Error scheduling IBs (%d)\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -2171,7 +2170,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
|
|||
|
||||
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
|
||||
WARN_ON(job->ibs[0].length_dw > num_dw);
|
||||
r = amdgpu_job_submit(job, ring, &adev->mman.entity,
|
||||
r = amdgpu_job_submit(job, &adev->mman.entity,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED, fence);
|
||||
if (r)
|
||||
goto error_free;
|
||||
|
|
|
@ -73,7 +73,7 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem);
|
|||
uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
|
||||
int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man);
|
||||
|
||||
u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo);
|
||||
u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo);
|
||||
uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
|
||||
uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
|
||||
|
||||
|
|
|
@ -263,21 +263,20 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
|
|||
dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
ring = &adev->uvd.inst[j].ring;
|
||||
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
|
||||
r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity,
|
||||
rq, NULL);
|
||||
if (r != 0) {
|
||||
DRM_ERROR("Failed setting up UVD(%d) run queue.\n", j);
|
||||
return r;
|
||||
}
|
||||
|
||||
for (i = 0; i < adev->uvd.max_handles; ++i) {
|
||||
atomic_set(&adev->uvd.inst[j].handles[i], 0);
|
||||
adev->uvd.inst[j].filp[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
ring = &adev->uvd.inst[0].ring;
|
||||
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
|
||||
r = drm_sched_entity_init(&adev->uvd.entity, &rq, 1, NULL);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed setting up UVD kernel entity.\n");
|
||||
return r;
|
||||
}
|
||||
for (i = 0; i < adev->uvd.max_handles; ++i) {
|
||||
atomic_set(&adev->uvd.handles[i], 0);
|
||||
adev->uvd.filp[i] = NULL;
|
||||
}
|
||||
|
||||
/* from uvd v5.0 HW addressing capacity increased to 64 bits */
|
||||
if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
|
||||
adev->uvd.address_64_bit = true;
|
||||
|
@ -306,11 +305,12 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
|
|||
{
|
||||
int i, j;
|
||||
|
||||
drm_sched_entity_destroy(&adev->uvd.inst->ring.sched,
|
||||
&adev->uvd.entity);
|
||||
|
||||
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
|
||||
kfree(adev->uvd.inst[j].saved_bo);
|
||||
|
||||
drm_sched_entity_destroy(&adev->uvd.inst[j].ring.sched, &adev->uvd.inst[j].entity);
|
||||
|
||||
amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
|
||||
&adev->uvd.inst[j].gpu_addr,
|
||||
(void **)&adev->uvd.inst[j].cpu_addr);
|
||||
|
@ -333,20 +333,20 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
|
|||
|
||||
cancel_delayed_work_sync(&adev->uvd.idle_work);
|
||||
|
||||
/* only valid for physical mode */
|
||||
if (adev->asic_type < CHIP_POLARIS10) {
|
||||
for (i = 0; i < adev->uvd.max_handles; ++i)
|
||||
if (atomic_read(&adev->uvd.handles[i]))
|
||||
break;
|
||||
|
||||
if (i == adev->uvd.max_handles)
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
|
||||
if (adev->uvd.inst[j].vcpu_bo == NULL)
|
||||
continue;
|
||||
|
||||
/* only valid for physical mode */
|
||||
if (adev->asic_type < CHIP_POLARIS10) {
|
||||
for (i = 0; i < adev->uvd.max_handles; ++i)
|
||||
if (atomic_read(&adev->uvd.inst[j].handles[i]))
|
||||
break;
|
||||
|
||||
if (i == adev->uvd.max_handles)
|
||||
continue;
|
||||
}
|
||||
|
||||
size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo);
|
||||
ptr = adev->uvd.inst[j].cpu_addr;
|
||||
|
||||
|
@ -398,30 +398,27 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
|
|||
|
||||
void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
|
||||
{
|
||||
struct amdgpu_ring *ring;
|
||||
int i, j, r;
|
||||
struct amdgpu_ring *ring = &adev->uvd.inst[0].ring;
|
||||
int i, r;
|
||||
|
||||
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
|
||||
ring = &adev->uvd.inst[j].ring;
|
||||
for (i = 0; i < adev->uvd.max_handles; ++i) {
|
||||
uint32_t handle = atomic_read(&adev->uvd.handles[i]);
|
||||
|
||||
for (i = 0; i < adev->uvd.max_handles; ++i) {
|
||||
uint32_t handle = atomic_read(&adev->uvd.inst[j].handles[i]);
|
||||
if (handle != 0 && adev->uvd.inst[j].filp[i] == filp) {
|
||||
struct dma_fence *fence;
|
||||
if (handle != 0 && adev->uvd.filp[i] == filp) {
|
||||
struct dma_fence *fence;
|
||||
|
||||
r = amdgpu_uvd_get_destroy_msg(ring, handle,
|
||||
false, &fence);
|
||||
if (r) {
|
||||
DRM_ERROR("Error destroying UVD(%d) %d!\n", j, r);
|
||||
continue;
|
||||
}
|
||||
|
||||
dma_fence_wait(fence, false);
|
||||
dma_fence_put(fence);
|
||||
|
||||
adev->uvd.inst[j].filp[i] = NULL;
|
||||
atomic_set(&adev->uvd.inst[j].handles[i], 0);
|
||||
r = amdgpu_uvd_get_destroy_msg(ring, handle, false,
|
||||
&fence);
|
||||
if (r) {
|
||||
DRM_ERROR("Error destroying UVD %d!\n", r);
|
||||
continue;
|
||||
}
|
||||
|
||||
dma_fence_wait(fence, false);
|
||||
dma_fence_put(fence);
|
||||
|
||||
adev->uvd.filp[i] = NULL;
|
||||
atomic_set(&adev->uvd.handles[i], 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -696,16 +693,15 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
|
|||
void *ptr;
|
||||
long r;
|
||||
int i;
|
||||
uint32_t ip_instance = ctx->parser->job->ring->me;
|
||||
|
||||
if (offset & 0x3F) {
|
||||
DRM_ERROR("UVD(%d) messages must be 64 byte aligned!\n", ip_instance);
|
||||
DRM_ERROR("UVD messages must be 64 byte aligned!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
r = amdgpu_bo_kmap(bo, &ptr);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed mapping the UVD(%d) message (%ld)!\n", ip_instance, r);
|
||||
DRM_ERROR("Failed mapping the UVD) message (%ld)!\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -715,7 +711,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
|
|||
handle = msg[2];
|
||||
|
||||
if (handle == 0) {
|
||||
DRM_ERROR("Invalid UVD(%d) handle!\n", ip_instance);
|
||||
DRM_ERROR("Invalid UVD handle!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -726,18 +722,19 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
|
|||
|
||||
/* try to alloc a new handle */
|
||||
for (i = 0; i < adev->uvd.max_handles; ++i) {
|
||||
if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) {
|
||||
DRM_ERROR("(%d)Handle 0x%x already in use!\n", ip_instance, handle);
|
||||
if (atomic_read(&adev->uvd.handles[i]) == handle) {
|
||||
DRM_ERROR(")Handle 0x%x already in use!\n",
|
||||
handle);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], 0, handle)) {
|
||||
adev->uvd.inst[ip_instance].filp[i] = ctx->parser->filp;
|
||||
if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) {
|
||||
adev->uvd.filp[i] = ctx->parser->filp;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
DRM_ERROR("No more free UVD(%d) handles!\n", ip_instance);
|
||||
DRM_ERROR("No more free UVD handles!\n");
|
||||
return -ENOSPC;
|
||||
|
||||
case 1:
|
||||
|
@ -749,27 +746,27 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
|
|||
|
||||
/* validate the handle */
|
||||
for (i = 0; i < adev->uvd.max_handles; ++i) {
|
||||
if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) {
|
||||
if (adev->uvd.inst[ip_instance].filp[i] != ctx->parser->filp) {
|
||||
DRM_ERROR("UVD(%d) handle collision detected!\n", ip_instance);
|
||||
if (atomic_read(&adev->uvd.handles[i]) == handle) {
|
||||
if (adev->uvd.filp[i] != ctx->parser->filp) {
|
||||
DRM_ERROR("UVD handle collision detected!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
DRM_ERROR("Invalid UVD(%d) handle 0x%x!\n", ip_instance, handle);
|
||||
DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
|
||||
return -ENOENT;
|
||||
|
||||
case 2:
|
||||
/* it's a destroy msg, free the handle */
|
||||
for (i = 0; i < adev->uvd.max_handles; ++i)
|
||||
atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], handle, 0);
|
||||
atomic_cmpxchg(&adev->uvd.handles[i], handle, 0);
|
||||
amdgpu_bo_kunmap(bo);
|
||||
return 0;
|
||||
|
||||
default:
|
||||
DRM_ERROR("Illegal UVD(%d) message type (%d)!\n", ip_instance, msg_type);
|
||||
DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
|
||||
return -EINVAL;
|
||||
}
|
||||
BUG();
|
||||
|
@ -1062,19 +1059,16 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
|
|||
if (r < 0)
|
||||
goto err_free;
|
||||
|
||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
||||
job->fence = dma_fence_get(f);
|
||||
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||
if (r)
|
||||
goto err_free;
|
||||
|
||||
amdgpu_job_free(job);
|
||||
} else {
|
||||
r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED, false);
|
||||
if (r)
|
||||
goto err_free;
|
||||
|
||||
r = amdgpu_job_submit(job, ring, &adev->uvd.inst[ring->me].entity,
|
||||
r = amdgpu_job_submit(job, &adev->uvd.entity,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
|
||||
if (r)
|
||||
goto err_free;
|
||||
|
@ -1276,7 +1270,7 @@ uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev)
|
|||
* necessarily linear. So we need to count
|
||||
* all non-zero handles.
|
||||
*/
|
||||
if (atomic_read(&adev->uvd.inst->handles[i]))
|
||||
if (atomic_read(&adev->uvd.handles[i]))
|
||||
used_handles++;
|
||||
}
|
||||
|
||||
|
|
|
@ -42,13 +42,9 @@ struct amdgpu_uvd_inst {
|
|||
void *cpu_addr;
|
||||
uint64_t gpu_addr;
|
||||
void *saved_bo;
|
||||
atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
|
||||
struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
|
||||
struct amdgpu_ring ring;
|
||||
struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS];
|
||||
struct amdgpu_irq_src irq;
|
||||
struct drm_sched_entity entity;
|
||||
struct drm_sched_entity entity_enc;
|
||||
uint32_t srbm_soft_reset;
|
||||
};
|
||||
|
||||
|
@ -57,10 +53,13 @@ struct amdgpu_uvd {
|
|||
unsigned fw_version;
|
||||
unsigned max_handles;
|
||||
unsigned num_enc_rings;
|
||||
uint8_t num_uvd_inst;
|
||||
uint8_t num_uvd_inst;
|
||||
bool address_64_bit;
|
||||
bool use_ctx_buf;
|
||||
struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES];
|
||||
struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES];
|
||||
struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
|
||||
atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
|
||||
struct drm_sched_entity entity;
|
||||
struct delayed_work idle_work;
|
||||
};
|
||||
|
||||
|
|
|
@ -190,8 +190,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
|
|||
|
||||
ring = &adev->vce.ring[0];
|
||||
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
|
||||
r = drm_sched_entity_init(&ring->sched, &adev->vce.entity,
|
||||
rq, NULL);
|
||||
r = drm_sched_entity_init(&adev->vce.entity, &rq, 1, NULL);
|
||||
if (r != 0) {
|
||||
DRM_ERROR("Failed setting up VCE run queue.\n");
|
||||
return r;
|
||||
|
@ -470,12 +469,10 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
|||
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
||||
ib->ptr[i] = 0x0;
|
||||
|
||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
||||
job->fence = dma_fence_get(f);
|
||||
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
amdgpu_job_free(job);
|
||||
if (fence)
|
||||
*fence = dma_fence_get(f);
|
||||
dma_fence_put(f);
|
||||
|
@ -532,19 +529,13 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
|||
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
||||
ib->ptr[i] = 0x0;
|
||||
|
||||
if (direct) {
|
||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
||||
job->fence = dma_fence_get(f);
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
amdgpu_job_free(job);
|
||||
} else {
|
||||
r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
|
||||
if (direct)
|
||||
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||
else
|
||||
r = amdgpu_job_submit(job, &ring->adev->vce.entity,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
|
||||
if (r)
|
||||
goto err;
|
||||
}
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
if (fence)
|
||||
*fence = dma_fence_get(f);
|
||||
|
|
|
@ -211,6 +211,8 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
|
|||
fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
|
||||
}
|
||||
|
||||
fences += amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg);
|
||||
|
||||
if (fences == 0) {
|
||||
if (adev->pm.dpm_enabled)
|
||||
amdgpu_dpm_enable_uvd(adev, false);
|
||||
|
@ -227,7 +229,7 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
|
|||
struct amdgpu_device *adev = ring->adev;
|
||||
bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
|
||||
|
||||
if (set_clocks && adev->pm.dpm_enabled) {
|
||||
if (set_clocks) {
|
||||
if (adev->pm.dpm_enabled)
|
||||
amdgpu_dpm_enable_uvd(adev, true);
|
||||
else
|
||||
|
@ -306,13 +308,10 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
|
|||
}
|
||||
ib->length_dw = 16;
|
||||
|
||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
||||
job->fence = dma_fence_get(f);
|
||||
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||
if (r)
|
||||
goto err_free;
|
||||
|
||||
amdgpu_job_free(job);
|
||||
|
||||
amdgpu_bo_fence(bo, f, false);
|
||||
amdgpu_bo_unreserve(bo);
|
||||
amdgpu_bo_unref(&bo);
|
||||
|
@ -497,12 +496,10 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
|
|||
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
||||
ib->ptr[i] = 0x0;
|
||||
|
||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
||||
job->fence = dma_fence_get(f);
|
||||
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
amdgpu_job_free(job);
|
||||
if (fence)
|
||||
*fence = dma_fence_get(f);
|
||||
dma_fence_put(f);
|
||||
|
@ -551,12 +548,10 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
|
|||
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
||||
ib->ptr[i] = 0x0;
|
||||
|
||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
||||
job->fence = dma_fence_get(f);
|
||||
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
amdgpu_job_free(job);
|
||||
if (fence)
|
||||
*fence = dma_fence_get(f);
|
||||
dma_fence_put(f);
|
||||
|
@ -664,12 +659,10 @@ static int amdgpu_vcn_jpeg_set_reg(struct amdgpu_ring *ring, uint32_t handle,
|
|||
}
|
||||
ib->length_dw = 16;
|
||||
|
||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
||||
job->fence = dma_fence_get(f);
|
||||
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
amdgpu_job_free(job);
|
||||
if (fence)
|
||||
*fence = dma_fence_get(f);
|
||||
dma_fence_put(f);
|
||||
|
|
|
@ -156,6 +156,9 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
|
|||
return;
|
||||
list_add_tail(&base->bo_list, &bo->va);
|
||||
|
||||
if (bo->tbo.type == ttm_bo_type_kernel)
|
||||
list_move(&base->vm_status, &vm->relocated);
|
||||
|
||||
if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
|
||||
return;
|
||||
|
||||
|
@ -422,8 +425,8 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
|
|||
if (r)
|
||||
goto error_free;
|
||||
|
||||
r = amdgpu_job_submit(job, ring, &vm->entity,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
|
||||
r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_UNDEFINED,
|
||||
&fence);
|
||||
if (r)
|
||||
goto error_free;
|
||||
|
||||
|
@ -540,7 +543,6 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
|
|||
pt->parent = amdgpu_bo_ref(parent->base.bo);
|
||||
|
||||
amdgpu_vm_bo_base_init(&entry->base, vm, pt);
|
||||
list_move(&entry->base.vm_status, &vm->relocated);
|
||||
}
|
||||
|
||||
if (level < AMDGPU_VM_PTB) {
|
||||
|
@ -1118,8 +1120,8 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
|
|||
amdgpu_sync_resv(adev, &job->sync, root->tbo.resv,
|
||||
AMDGPU_FENCE_OWNER_VM, false);
|
||||
WARN_ON(params.ib->length_dw > ndw);
|
||||
r = amdgpu_job_submit(job, ring, &vm->entity,
|
||||
AMDGPU_FENCE_OWNER_VM, &fence);
|
||||
r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM,
|
||||
&fence);
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
|
@ -1483,8 +1485,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
|
|||
|
||||
amdgpu_ring_pad_ib(ring, params.ib);
|
||||
WARN_ON(params.ib->length_dw > ndw);
|
||||
r = amdgpu_job_submit(job, ring, &vm->entity,
|
||||
AMDGPU_FENCE_OWNER_VM, &f);
|
||||
r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM, &f);
|
||||
if (r)
|
||||
goto error_free;
|
||||
|
||||
|
@ -1645,18 +1646,17 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
|||
uint64_t flags;
|
||||
int r;
|
||||
|
||||
if (clear || !bo_va->base.bo) {
|
||||
if (clear || !bo) {
|
||||
mem = NULL;
|
||||
nodes = NULL;
|
||||
exclusive = NULL;
|
||||
} else {
|
||||
struct ttm_dma_tt *ttm;
|
||||
|
||||
mem = &bo_va->base.bo->tbo.mem;
|
||||
mem = &bo->tbo.mem;
|
||||
nodes = mem->mm_node;
|
||||
if (mem->mem_type == TTM_PL_TT) {
|
||||
ttm = container_of(bo_va->base.bo->tbo.ttm,
|
||||
struct ttm_dma_tt, ttm);
|
||||
ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
|
||||
pages_addr = ttm->dma_address;
|
||||
}
|
||||
exclusive = reservation_object_get_excl(bo->tbo.resv);
|
||||
|
@ -2562,8 +2562,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||
ring_instance %= adev->vm_manager.vm_pte_num_rings;
|
||||
ring = adev->vm_manager.vm_pte_rings[ring_instance];
|
||||
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
|
||||
r = drm_sched_entity_init(&ring->sched, &vm->entity,
|
||||
rq, NULL);
|
||||
r = drm_sched_entity_init(&vm->entity, &rq, 1, NULL);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -2942,3 +2941,42 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vm_get_task_info - Extracts task info for a PASID.
|
||||
*
|
||||
* @dev: drm device pointer
|
||||
* @pasid: PASID identifier for VM
|
||||
* @task_info: task_info to fill.
|
||||
*/
|
||||
void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
|
||||
struct amdgpu_task_info *task_info)
|
||||
{
|
||||
struct amdgpu_vm *vm;
|
||||
|
||||
spin_lock(&adev->vm_manager.pasid_lock);
|
||||
|
||||
vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
|
||||
if (vm)
|
||||
*task_info = vm->task_info;
|
||||
|
||||
spin_unlock(&adev->vm_manager.pasid_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vm_set_task_info - Sets VMs task info.
|
||||
*
|
||||
* @vm: vm for which to set the info
|
||||
*/
|
||||
void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
|
||||
{
|
||||
if (!vm->task_info.pid) {
|
||||
vm->task_info.pid = current->pid;
|
||||
get_task_comm(vm->task_info.task_name, current);
|
||||
|
||||
if (current->group_leader->mm == current->mm) {
|
||||
vm->task_info.tgid = current->group_leader->pid;
|
||||
get_task_comm(vm->task_info.process_name, current->group_leader);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -164,6 +164,14 @@ struct amdgpu_vm_pt {
|
|||
#define AMDGPU_VM_FAULT_PASID(fault) ((u64)(fault) >> 48)
|
||||
#define AMDGPU_VM_FAULT_ADDR(fault) ((u64)(fault) & 0xfffffffff000ULL)
|
||||
|
||||
|
||||
struct amdgpu_task_info {
|
||||
char process_name[TASK_COMM_LEN];
|
||||
char task_name[TASK_COMM_LEN];
|
||||
pid_t pid;
|
||||
pid_t tgid;
|
||||
};
|
||||
|
||||
struct amdgpu_vm {
|
||||
/* tree of virtual addresses mapped */
|
||||
struct rb_root_cached va;
|
||||
|
@ -215,6 +223,9 @@ struct amdgpu_vm {
|
|||
|
||||
/* Valid while the PD is reserved or fenced */
|
||||
uint64_t pd_phys_addr;
|
||||
|
||||
/* Some basic info about the task */
|
||||
struct amdgpu_task_info task_info;
|
||||
};
|
||||
|
||||
struct amdgpu_vm_manager {
|
||||
|
@ -317,4 +328,9 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
|
|||
struct amdgpu_job *job);
|
||||
void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
|
||||
|
||||
void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
|
||||
struct amdgpu_task_info *task_info);
|
||||
|
||||
void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -97,33 +97,29 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
|
|||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vram_mgr_bo_invisible_size - CPU invisible BO size
|
||||
* amdgpu_vram_mgr_bo_visible_size - CPU visible BO size
|
||||
*
|
||||
* @bo: &amdgpu_bo buffer object (must be in VRAM)
|
||||
*
|
||||
* Returns:
|
||||
* How much of the given &amdgpu_bo buffer object lies in CPU invisible VRAM.
|
||||
* How much of the given &amdgpu_bo buffer object lies in CPU visible VRAM.
|
||||
*/
|
||||
u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo)
|
||||
u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
struct ttm_mem_reg *mem = &bo->tbo.mem;
|
||||
struct drm_mm_node *nodes = mem->mm_node;
|
||||
unsigned pages = mem->num_pages;
|
||||
u64 usage = 0;
|
||||
u64 usage;
|
||||
|
||||
if (amdgpu_gmc_vram_full_visible(&adev->gmc))
|
||||
return 0;
|
||||
|
||||
if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
|
||||
return amdgpu_bo_size(bo);
|
||||
|
||||
while (nodes && pages) {
|
||||
usage += nodes->size << PAGE_SHIFT;
|
||||
usage -= amdgpu_vram_mgr_vis_size(adev, nodes);
|
||||
pages -= nodes->size;
|
||||
++nodes;
|
||||
}
|
||||
if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
|
||||
return 0;
|
||||
|
||||
for (usage = 0; nodes && pages; pages -= nodes->size, nodes++)
|
||||
usage += amdgpu_vram_mgr_vis_size(adev, nodes);
|
||||
|
||||
return usage;
|
||||
}
|
||||
|
|
|
@ -41,6 +41,8 @@
|
|||
#include "gmc/gmc_8_1_d.h"
|
||||
#include "gmc/gmc_8_1_sh_mask.h"
|
||||
|
||||
#include "ivsrcid/ivsrcid_vislands30.h"
|
||||
|
||||
static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev);
|
||||
static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev);
|
||||
|
||||
|
@ -1855,15 +1857,14 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
|
|||
if (unlikely(r != 0))
|
||||
return r;
|
||||
|
||||
if (atomic) {
|
||||
fb_location = amdgpu_bo_gpu_offset(abo);
|
||||
} else {
|
||||
r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
|
||||
if (!atomic) {
|
||||
r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
|
||||
if (unlikely(r != 0)) {
|
||||
amdgpu_bo_unreserve(abo);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
fb_location = amdgpu_bo_gpu_offset(abo);
|
||||
|
||||
amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
|
||||
amdgpu_bo_unreserve(abo);
|
||||
|
@ -2370,13 +2371,14 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
|
||||
ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
|
||||
amdgpu_bo_unreserve(aobj);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
|
||||
drm_gem_object_put_unlocked(obj);
|
||||
return ret;
|
||||
}
|
||||
amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
|
||||
|
||||
dce_v10_0_lock_cursor(crtc, true);
|
||||
|
||||
|
@ -2737,14 +2739,14 @@ static int dce_v10_0_sw_init(void *handle)
|
|||
return r;
|
||||
}
|
||||
|
||||
for (i = 8; i < 20; i += 2) {
|
||||
for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i < 20; i += 2) {
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
/* HPD hotplug */
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq);
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
|
@ -41,6 +41,8 @@
|
|||
#include "gmc/gmc_8_1_d.h"
|
||||
#include "gmc/gmc_8_1_sh_mask.h"
|
||||
|
||||
#include "ivsrcid/ivsrcid_vislands30.h"
|
||||
|
||||
static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev);
|
||||
static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev);
|
||||
|
||||
|
@ -1897,15 +1899,14 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
|
|||
if (unlikely(r != 0))
|
||||
return r;
|
||||
|
||||
if (atomic) {
|
||||
fb_location = amdgpu_bo_gpu_offset(abo);
|
||||
} else {
|
||||
r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
|
||||
if (!atomic) {
|
||||
r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
|
||||
if (unlikely(r != 0)) {
|
||||
amdgpu_bo_unreserve(abo);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
fb_location = amdgpu_bo_gpu_offset(abo);
|
||||
|
||||
amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
|
||||
amdgpu_bo_unreserve(abo);
|
||||
|
@ -2449,13 +2450,14 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
|
||||
ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
|
||||
amdgpu_bo_unreserve(aobj);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
|
||||
drm_gem_object_put_unlocked(obj);
|
||||
return ret;
|
||||
}
|
||||
amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
|
||||
|
||||
dce_v11_0_lock_cursor(crtc, true);
|
||||
|
||||
|
@ -2858,14 +2860,14 @@ static int dce_v11_0_sw_init(void *handle)
|
|||
return r;
|
||||
}
|
||||
|
||||
for (i = 8; i < 20; i += 2) {
|
||||
for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i < 20; i += 2) {
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
/* HPD hotplug */
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq);
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
|
@ -1811,15 +1811,14 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
|
|||
if (unlikely(r != 0))
|
||||
return r;
|
||||
|
||||
if (atomic) {
|
||||
fb_location = amdgpu_bo_gpu_offset(abo);
|
||||
} else {
|
||||
r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
|
||||
if (!atomic) {
|
||||
r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
|
||||
if (unlikely(r != 0)) {
|
||||
amdgpu_bo_unreserve(abo);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
fb_location = amdgpu_bo_gpu_offset(abo);
|
||||
|
||||
amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
|
||||
amdgpu_bo_unreserve(abo);
|
||||
|
@ -2263,13 +2262,14 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
|
||||
ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
|
||||
amdgpu_bo_unreserve(aobj);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
|
||||
drm_gem_object_put_unlocked(obj);
|
||||
return ret;
|
||||
}
|
||||
amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
|
||||
|
||||
dce_v6_0_lock_cursor(crtc, true);
|
||||
|
||||
|
|
|
@ -1786,15 +1786,14 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
|
|||
if (unlikely(r != 0))
|
||||
return r;
|
||||
|
||||
if (atomic) {
|
||||
fb_location = amdgpu_bo_gpu_offset(abo);
|
||||
} else {
|
||||
r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
|
||||
if (!atomic) {
|
||||
r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
|
||||
if (unlikely(r != 0)) {
|
||||
amdgpu_bo_unreserve(abo);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
fb_location = amdgpu_bo_gpu_offset(abo);
|
||||
|
||||
amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
|
||||
amdgpu_bo_unreserve(abo);
|
||||
|
@ -2274,13 +2273,14 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
|
||||
ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
|
||||
amdgpu_bo_unreserve(aobj);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
|
||||
drm_gem_object_put_unlocked(obj);
|
||||
return ret;
|
||||
}
|
||||
amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
|
||||
|
||||
dce_v8_0_lock_cursor(crtc, true);
|
||||
|
||||
|
|
|
@ -36,6 +36,7 @@
|
|||
#include "dce_v10_0.h"
|
||||
#include "dce_v11_0.h"
|
||||
#include "dce_virtual.h"
|
||||
#include "ivsrcid/ivsrcid_vislands30.h"
|
||||
|
||||
#define DCE_VIRTUAL_VBLANK_PERIOD 16666666
|
||||
|
||||
|
@ -371,7 +372,7 @@ static int dce_virtual_sw_init(void *handle)
|
|||
int r, i;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 229, &adev->crtc_irq);
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SMU_DISP_TIMER2_TRIGGER, &adev->crtc_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
|
@ -51,6 +51,8 @@
|
|||
|
||||
#include "smu/smu_7_1_3_d.h"
|
||||
|
||||
#include "ivsrcid/ivsrcid_vislands30.h"
|
||||
|
||||
#define GFX8_NUM_GFX_RINGS 1
|
||||
#define GFX8_MEC_HPD_SIZE 2048
|
||||
|
||||
|
@ -2047,35 +2049,35 @@ static int gfx_v8_0_sw_init(void *handle)
|
|||
adev->gfx.mec.num_queue_per_pipe = 8;
|
||||
|
||||
/* KIQ event */
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 178, &adev->gfx.kiq.irq);
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_INT_IB2, &adev->gfx.kiq.irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* EOP Event */
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_END_OF_PIPE, &adev->gfx.eop_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* Privileged reg */
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 184,
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_REG_FAULT,
|
||||
&adev->gfx.priv_reg_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* Privileged inst */
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 185,
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_INSTR_FAULT,
|
||||
&adev->gfx.priv_inst_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* Add CP EDC/ECC irq */
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 197,
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_ECC_ERROR,
|
||||
&adev->gfx.cp_ecc_error_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* SQ interrupts. */
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 239,
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SQ_INTERRUPT_MSG,
|
||||
&adev->gfx.sq_irq);
|
||||
if (r) {
|
||||
DRM_ERROR("amdgpu_irq_add() for SQ failed: %d\n", r);
|
||||
|
|
|
@ -38,6 +38,8 @@
|
|||
#include "clearstate_gfx9.h"
|
||||
#include "v9_structs.h"
|
||||
|
||||
#include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
|
||||
|
||||
#define GFX9_NUM_GFX_RINGS 1
|
||||
#define GFX9_MEC_HPD_SIZE 2048
|
||||
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
|
||||
|
@ -102,11 +104,22 @@ static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
|
|||
{
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0x0000f000, 0x00012107),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x2a114042),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00008000, 0x00048000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800)
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
|
||||
};
|
||||
|
||||
static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
|
||||
|
@ -648,7 +661,10 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
|
|||
adev->firmware.fw_size +=
|
||||
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
|
||||
|
||||
if (adev->gfx.rlc.is_rlc_v2_1) {
|
||||
if (adev->gfx.rlc.is_rlc_v2_1 &&
|
||||
adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
|
||||
adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
|
||||
adev->gfx.rlc.save_restore_list_srm_size_bytes) {
|
||||
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
|
||||
info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
|
||||
info->fw = adev->gfx.rlc_fw;
|
||||
|
@ -943,6 +959,7 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
|
|||
dst_ptr = adev->gfx.rlc.cs_ptr;
|
||||
gfx_v9_0_get_csb_buffer(adev, dst_ptr);
|
||||
amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
|
||||
amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
|
||||
amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
|
||||
}
|
||||
|
||||
|
@ -971,6 +988,39 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int gfx_v9_0_csb_vram_pin(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
|
||||
r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj,
|
||||
AMDGPU_GEM_DOMAIN_VRAM);
|
||||
if (!r)
|
||||
adev->gfx.rlc.clear_state_gpu_addr =
|
||||
amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj);
|
||||
|
||||
amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static void gfx_v9_0_csb_vram_unpin(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (!adev->gfx.rlc.clear_state_obj)
|
||||
return;
|
||||
|
||||
r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
|
||||
if (likely(r == 0)) {
|
||||
amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
|
||||
amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
|
||||
}
|
||||
}
|
||||
|
||||
static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
|
||||
|
@ -1451,23 +1501,23 @@ static int gfx_v9_0_sw_init(void *handle)
|
|||
adev->gfx.mec.num_queue_per_pipe = 8;
|
||||
|
||||
/* KIQ event */
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 178, &adev->gfx.kiq.irq);
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_IB2_INTERRUPT_PKT, &adev->gfx.kiq.irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* EOP Event */
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 181, &adev->gfx.eop_irq);
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* Privileged reg */
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 184,
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
|
||||
&adev->gfx.priv_reg_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* Privileged inst */
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 185,
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
|
||||
&adev->gfx.priv_inst_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -2148,8 +2198,16 @@ static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *ad
|
|||
|
||||
static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
|
||||
{
|
||||
if (!adev->gfx.rlc.is_rlc_v2_1)
|
||||
return;
|
||||
gfx_v9_0_init_csb(adev);
|
||||
|
||||
/*
|
||||
* Rlc save restore list is workable since v2_1.
|
||||
* And it's needed by gfxoff feature.
|
||||
*/
|
||||
if (adev->gfx.rlc.is_rlc_v2_1) {
|
||||
gfx_v9_1_init_rlc_save_restore_list(adev);
|
||||
gfx_v9_0_enable_save_restore_machine(adev);
|
||||
}
|
||||
|
||||
if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
|
||||
AMD_PG_SUPPORT_GFX_SMG |
|
||||
|
@ -2157,10 +2215,6 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
|
|||
AMD_PG_SUPPORT_CP |
|
||||
AMD_PG_SUPPORT_GDS |
|
||||
AMD_PG_SUPPORT_RLC_SMU_HS)) {
|
||||
gfx_v9_0_init_csb(adev);
|
||||
gfx_v9_1_init_rlc_save_restore_list(adev);
|
||||
gfx_v9_0_enable_save_restore_machine(adev);
|
||||
|
||||
WREG32(mmRLC_JUMP_TABLE_RESTORE,
|
||||
adev->gfx.rlc.cp_table_gpu_addr >> 8);
|
||||
gfx_v9_0_init_gfx_power_gating(adev);
|
||||
|
@ -2252,9 +2306,6 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
|
|||
/* disable CG */
|
||||
WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
|
||||
|
||||
/* disable PG */
|
||||
WREG32_SOC15(GC, 0, mmRLC_PG_CNTL, 0);
|
||||
|
||||
gfx_v9_0_rlc_reset(adev);
|
||||
|
||||
gfx_v9_0_init_pg(adev);
|
||||
|
@ -3116,6 +3167,10 @@ static int gfx_v9_0_hw_init(void *handle)
|
|||
|
||||
gfx_v9_0_gpu_init(adev);
|
||||
|
||||
r = gfx_v9_0_csb_vram_pin(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = gfx_v9_0_rlc_resume(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -3224,6 +3279,8 @@ static int gfx_v9_0_hw_fini(void *handle)
|
|||
gfx_v9_0_cp_enable(adev, false);
|
||||
gfx_v9_0_rlc_stop(adev);
|
||||
|
||||
gfx_v9_0_csb_vram_unpin(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3510,8 +3567,11 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
|
|||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
|
||||
/* 1 - RLC_CGTT_MGCG_OVERRIDE */
|
||||
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
|
||||
data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK |
|
||||
RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
|
||||
|
||||
if (adev->asic_type != CHIP_VEGA12)
|
||||
data &= ~RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
|
||||
|
||||
data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
|
||||
RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
|
||||
RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
|
||||
|
||||
|
@ -3541,11 +3601,15 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
|
|||
} else {
|
||||
/* 1 - MGCG_OVERRIDE */
|
||||
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
|
||||
data |= (RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK |
|
||||
RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
|
||||
|
||||
if (adev->asic_type != CHIP_VEGA12)
|
||||
data |= RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
|
||||
|
||||
data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
|
||||
RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
|
||||
RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
|
||||
RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
|
||||
|
||||
if (def != data)
|
||||
WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
|
||||
|
||||
|
@ -3581,9 +3645,11 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
|
|||
/* update CGCG and CGLS override bits */
|
||||
if (def != data)
|
||||
WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
|
||||
/* enable 3Dcgcg FSM(0x0020003f) */
|
||||
|
||||
/* enable 3Dcgcg FSM(0x0000363f) */
|
||||
def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
|
||||
data = (0x2000 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
|
||||
|
||||
data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
|
||||
RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
|
||||
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
|
||||
data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
|
||||
|
@ -3630,9 +3696,10 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
|
|||
if (def != data)
|
||||
WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
|
||||
|
||||
/* enable cgcg FSM(0x0020003F) */
|
||||
/* enable cgcg FSM(0x0000363F) */
|
||||
def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
|
||||
data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
|
||||
|
||||
data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
|
||||
RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
|
||||
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
|
||||
data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
|
||||
|
@ -3715,6 +3782,11 @@ static int gfx_v9_0_set_powergating_state(void *handle,
|
|||
/* update mgcg state */
|
||||
gfx_v9_0_update_gfx_mg_power_gating(adev, enable);
|
||||
|
||||
/* set gfx off through smu */
|
||||
if (enable && adev->powerplay.pp_funcs->set_powergating_by_smu)
|
||||
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true);
|
||||
break;
|
||||
case CHIP_VEGA12:
|
||||
/* set gfx off through smu */
|
||||
if (enable && adev->powerplay.pp_funcs->set_powergating_by_smu)
|
||||
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true);
|
||||
|
|
|
@ -43,6 +43,8 @@
|
|||
|
||||
#include "amdgpu_atombios.h"
|
||||
|
||||
#include "ivsrcid/ivsrcid_vislands30.h"
|
||||
|
||||
static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev);
|
||||
static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
|
||||
static int gmc_v7_0_wait_for_idle(void *handle);
|
||||
|
@ -996,11 +998,11 @@ static int gmc_v7_0_sw_init(void *handle)
|
|||
adev->gmc.vram_type = gmc_v7_0_convert_vram_type(tmp);
|
||||
}
|
||||
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
|
@ -44,6 +44,7 @@
|
|||
|
||||
#include "amdgpu_atombios.h"
|
||||
|
||||
#include "ivsrcid/ivsrcid_vislands30.h"
|
||||
|
||||
static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev);
|
||||
static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
|
||||
|
@ -1101,11 +1102,11 @@ static int gmc_v8_0_sw_init(void *handle)
|
|||
adev->gmc.vram_type = gmc_v8_0_convert_vram_type(tmp);
|
||||
}
|
||||
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -1447,8 +1448,13 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
|
|||
gmc_v8_0_set_fault_enable_default(adev, false);
|
||||
|
||||
if (printk_ratelimit()) {
|
||||
dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
|
||||
entry->src_id, entry->src_data[0]);
|
||||
struct amdgpu_task_info task_info = { 0 };
|
||||
|
||||
amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
|
||||
|
||||
dev_err(adev->dev, "GPU fault detected: %d 0x%08x for process %s pid %d thread %s pid %d\n",
|
||||
entry->src_id, entry->src_data[0], task_info.process_name,
|
||||
task_info.tgid, task_info.task_name, task_info.pid);
|
||||
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
|
||||
addr);
|
||||
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
|
||||
|
|
|
@ -43,6 +43,8 @@
|
|||
#include "gfxhub_v1_0.h"
|
||||
#include "mmhub_v1_0.h"
|
||||
|
||||
#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
|
||||
|
||||
/* add these here since we already include dce12 headers and these are for DCN */
|
||||
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
|
||||
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
|
||||
|
@ -257,11 +259,16 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
|
|||
}
|
||||
|
||||
if (printk_ratelimit()) {
|
||||
struct amdgpu_task_info task_info = { 0 };
|
||||
|
||||
amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
|
||||
|
||||
dev_err(adev->dev,
|
||||
"[%s] VMC page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n",
|
||||
"[%s] VMC page fault (src_id:%u ring:%u vmid:%u pasid:%u, for process %s pid %d thread %s pid %d\n)\n",
|
||||
entry->vmid_src ? "mmhub" : "gfxhub",
|
||||
entry->src_id, entry->ring_id, entry->vmid,
|
||||
entry->pasid);
|
||||
entry->pasid, task_info.process_name, task_info.tgid,
|
||||
task_info.task_name, task_info.pid);
|
||||
dev_err(adev->dev, " at page 0x%016llx from %d\n",
|
||||
addr, entry->client_id);
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
|
@ -872,9 +879,9 @@ static int gmc_v9_0_sw_init(void *handle)
|
|||
}
|
||||
|
||||
/* This interrupt is VMC page fault.*/
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, 0,
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
|
||||
&adev->gmc.vm_fault);
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, 0,
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
|
||||
&adev->gmc.vm_fault);
|
||||
|
||||
if (r)
|
||||
|
|
|
@ -44,6 +44,8 @@
|
|||
|
||||
#include "iceland_sdma_pkt_open.h"
|
||||
|
||||
#include "ivsrcid/ivsrcid_vislands30.h"
|
||||
|
||||
static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev);
|
||||
static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev);
|
||||
static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev);
|
||||
|
@ -896,7 +898,7 @@ static int sdma_v2_4_sw_init(void *handle)
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
/* SDMA trap event */
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224,
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP,
|
||||
&adev->sdma.trap_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -908,7 +910,7 @@ static int sdma_v2_4_sw_init(void *handle)
|
|||
return r;
|
||||
|
||||
/* SDMA Privileged inst */
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 247,
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE,
|
||||
&adev->sdma.illegal_inst_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
|
|
@ -44,6 +44,8 @@
|
|||
|
||||
#include "tonga_sdma_pkt_open.h"
|
||||
|
||||
#include "ivsrcid/ivsrcid_vislands30.h"
|
||||
|
||||
static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev);
|
||||
static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev);
|
||||
static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev);
|
||||
|
@ -1175,7 +1177,7 @@ static int sdma_v3_0_sw_init(void *handle)
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
/* SDMA trap event */
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224,
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP,
|
||||
&adev->sdma.trap_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -1187,7 +1189,7 @@ static int sdma_v3_0_sw_init(void *handle)
|
|||
return r;
|
||||
|
||||
/* SDMA Privileged inst */
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 247,
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE,
|
||||
&adev->sdma.illegal_inst_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
|
|
@ -38,6 +38,9 @@
|
|||
#include "soc15.h"
|
||||
#include "vega10_sdma_pkt_open.h"
|
||||
|
||||
#include "ivsrcid/sdma0/irqsrcs_sdma0_4_0.h"
|
||||
#include "ivsrcid/sdma1/irqsrcs_sdma1_4_0.h"
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/vega10_sdma.bin");
|
||||
MODULE_FIRMWARE("amdgpu/vega10_sdma1.bin");
|
||||
MODULE_FIRMWARE("amdgpu/vega12_sdma.bin");
|
||||
|
@ -1225,13 +1228,13 @@ static int sdma_v4_0_sw_init(void *handle)
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
/* SDMA trap event */
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0, 224,
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0, SDMA0_4_0__SRCID__SDMA_TRAP,
|
||||
&adev->sdma.trap_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* SDMA trap event */
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA1, 224,
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA1, SDMA1_4_0__SRCID__SDMA_TRAP,
|
||||
&adev->sdma.trap_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
#include "vi.h"
|
||||
#include "smu/smu_7_1_2_d.h"
|
||||
#include "smu/smu_7_1_2_sh_mask.h"
|
||||
#include "ivsrcid/ivsrcid_vislands30.h"
|
||||
|
||||
static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
|
||||
static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev);
|
||||
|
@ -104,7 +105,7 @@ static int uvd_v5_0_sw_init(void *handle)
|
|||
int r;
|
||||
|
||||
/* UVD TRAP */
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
|
@ -36,6 +36,7 @@
|
|||
#include "bif/bif_5_1_d.h"
|
||||
#include "gmc/gmc_8_1_d.h"
|
||||
#include "vi.h"
|
||||
#include "ivsrcid/ivsrcid_vislands30.h"
|
||||
|
||||
/* Polaris10/11/12 firmware version */
|
||||
#define FW_1_130_16 ((1 << 24) | (130 << 16) | (16 << 8))
|
||||
|
@ -247,12 +248,10 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
|
|||
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
||||
ib->ptr[i] = 0x0;
|
||||
|
||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
||||
job->fence = dma_fence_get(f);
|
||||
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
amdgpu_job_free(job);
|
||||
if (fence)
|
||||
*fence = dma_fence_get(f);
|
||||
dma_fence_put(f);
|
||||
|
@ -311,19 +310,13 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
|
|||
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
||||
ib->ptr[i] = 0x0;
|
||||
|
||||
if (direct) {
|
||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
||||
job->fence = dma_fence_get(f);
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
amdgpu_job_free(job);
|
||||
} else {
|
||||
r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
|
||||
if (direct)
|
||||
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||
else
|
||||
r = amdgpu_job_submit(job, &ring->adev->vce.entity,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
|
||||
if (r)
|
||||
goto err;
|
||||
}
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
if (fence)
|
||||
*fence = dma_fence_get(f);
|
||||
|
@ -400,14 +393,14 @@ static int uvd_v6_0_sw_init(void *handle)
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
/* UVD TRAP */
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* UVD ENC TRAP */
|
||||
if (uvd_v6_0_enc_support(adev)) {
|
||||
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 119, &adev->uvd.inst->irq);
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + VISLANDS30_IV_SRCID_UVD_ENC_GEN_PURP, &adev->uvd.inst->irq);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
@ -425,16 +418,6 @@ static int uvd_v6_0_sw_init(void *handle)
|
|||
adev->uvd.num_enc_rings = 0;
|
||||
|
||||
DRM_INFO("UVD ENC is disabled\n");
|
||||
} else {
|
||||
struct drm_sched_rq *rq;
|
||||
ring = &adev->uvd.inst->ring_enc[0];
|
||||
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
|
||||
r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity_enc,
|
||||
rq, NULL);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed setting up UVD ENC run queue.\n");
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
r = amdgpu_uvd_resume(adev);
|
||||
|
@ -470,8 +453,6 @@ static int uvd_v6_0_sw_fini(void *handle)
|
|||
return r;
|
||||
|
||||
if (uvd_v6_0_enc_support(adev)) {
|
||||
drm_sched_entity_destroy(&adev->uvd.inst->ring_enc[0].sched, &adev->uvd.inst->entity_enc);
|
||||
|
||||
for (i = 0; i < adev->uvd.num_enc_rings; ++i)
|
||||
amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]);
|
||||
}
|
||||
|
|
|
@ -39,6 +39,7 @@
|
|||
#include "hdp/hdp_4_0_offset.h"
|
||||
#include "mmhub/mmhub_1_0_offset.h"
|
||||
#include "mmhub/mmhub_1_0_sh_mask.h"
|
||||
#include "ivsrcid/uvd/irqsrcs_uvd_7_0.h"
|
||||
|
||||
#define UVD7_MAX_HW_INSTANCES_VEGA20 2
|
||||
|
||||
|
@ -249,12 +250,10 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
|
|||
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
||||
ib->ptr[i] = 0x0;
|
||||
|
||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
||||
job->fence = dma_fence_get(f);
|
||||
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
amdgpu_job_free(job);
|
||||
if (fence)
|
||||
*fence = dma_fence_get(f);
|
||||
dma_fence_put(f);
|
||||
|
@ -312,19 +311,13 @@ int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
|||
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
||||
ib->ptr[i] = 0x0;
|
||||
|
||||
if (direct) {
|
||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
||||
job->fence = dma_fence_get(f);
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
amdgpu_job_free(job);
|
||||
} else {
|
||||
r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
|
||||
if (direct)
|
||||
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||
else
|
||||
r = amdgpu_job_submit(job, &ring->adev->vce.entity,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
|
||||
if (r)
|
||||
goto err;
|
||||
}
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
if (fence)
|
||||
*fence = dma_fence_get(f);
|
||||
|
@ -396,19 +389,18 @@ static int uvd_v7_0_early_init(void *handle)
|
|||
static int uvd_v7_0_sw_init(void *handle)
|
||||
{
|
||||
struct amdgpu_ring *ring;
|
||||
struct drm_sched_rq *rq;
|
||||
int i, j, r;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
|
||||
/* UVD TRAP */
|
||||
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], 124, &adev->uvd.inst[j].irq);
|
||||
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->uvd.inst[j].irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* UVD ENC TRAP */
|
||||
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
|
||||
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], i + 119, &adev->uvd.inst[j].irq);
|
||||
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], i + UVD_7_0__SRCID__UVD_ENC_GEN_PURP, &adev->uvd.inst[j].irq);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
@ -428,17 +420,6 @@ static int uvd_v7_0_sw_init(void *handle)
|
|||
DRM_INFO("PSP loading UVD firmware\n");
|
||||
}
|
||||
|
||||
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
|
||||
ring = &adev->uvd.inst[j].ring_enc[0];
|
||||
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
|
||||
r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity_enc,
|
||||
rq, NULL);
|
||||
if (r) {
|
||||
DRM_ERROR("(%d)Failed setting up UVD ENC run queue.\n", j);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
r = amdgpu_uvd_resume(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -491,8 +472,6 @@ static int uvd_v7_0_sw_fini(void *handle)
|
|||
return r;
|
||||
|
||||
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
|
||||
drm_sched_entity_destroy(&adev->uvd.inst[j].ring_enc[0].sched, &adev->uvd.inst[j].entity_enc);
|
||||
|
||||
for (i = 0; i < adev->uvd.num_enc_rings; ++i)
|
||||
amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
|
||||
}
|
||||
|
|
|
@ -39,6 +39,7 @@
|
|||
#include "smu/smu_7_1_2_sh_mask.h"
|
||||
#include "gca/gfx_8_0_d.h"
|
||||
#include "gca/gfx_8_0_sh_mask.h"
|
||||
#include "ivsrcid/ivsrcid_vislands30.h"
|
||||
|
||||
|
||||
#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
|
||||
|
@ -422,7 +423,7 @@ static int vce_v3_0_sw_init(void *handle)
|
|||
int r, i;
|
||||
|
||||
/* VCE */
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 167, &adev->vce.irq);
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_VCE_TRAP, &adev->vce.irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
|
@ -39,6 +39,8 @@
|
|||
#include "mmhub/mmhub_1_0_offset.h"
|
||||
#include "mmhub/mmhub_1_0_sh_mask.h"
|
||||
|
||||
#include "ivsrcid/vce/irqsrcs_vce_4_0.h"
|
||||
|
||||
#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
|
||||
|
||||
#define VCE_V4_0_FW_SIZE (384 * 1024)
|
||||
|
|
|
@ -35,6 +35,8 @@
|
|||
#include "mmhub/mmhub_9_1_offset.h"
|
||||
#include "mmhub/mmhub_9_1_sh_mask.h"
|
||||
|
||||
#include "ivsrcid/vcn/irqsrcs_vcn_1_0.h"
|
||||
|
||||
static int vcn_v1_0_stop(struct amdgpu_device *adev);
|
||||
static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
|
||||
static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
|
||||
|
@ -77,13 +79,13 @@ static int vcn_v1_0_sw_init(void *handle)
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
/* VCN DEC TRAP */
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 124, &adev->vcn.irq);
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, VCN_1_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* VCN ENC TRAP */
|
||||
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + 119,
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + VCN_1_0__SRCID__UVD_ENC_GENERAL_PURPOSE,
|
||||
&adev->vcn.irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -600,12 +602,12 @@ static int vcn_v1_0_start(struct amdgpu_device *adev)
|
|||
/* disable byte swapping */
|
||||
lmi_swap_cntl = 0;
|
||||
|
||||
vcn_v1_0_mc_resume(adev);
|
||||
|
||||
vcn_1_0_disable_static_power_gating(adev);
|
||||
/* disable clock gating */
|
||||
vcn_v1_0_disable_clock_gating(adev);
|
||||
|
||||
vcn_v1_0_mc_resume(adev);
|
||||
|
||||
/* disable interupt */
|
||||
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
|
||||
~UVD_MASTINT_EN__VCPU_EN_MASK);
|
||||
|
|
|
@ -51,6 +51,7 @@ int vega10_reg_base_init(struct amdgpu_device *adev)
|
|||
adev->reg_offset[PWR_HWIP][i] = (uint32_t *)(&(PWR_BASE.instance[i]));
|
||||
adev->reg_offset[NBIF_HWIP][i] = (uint32_t *)(&(NBIF_BASE.instance[i]));
|
||||
adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
|
||||
adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i]));
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1363,11 +1363,11 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
|
|||
|
||||
if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) {
|
||||
if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) {
|
||||
pp_support_state = AMD_CG_SUPPORT_MC_LS;
|
||||
pp_support_state = PP_STATE_SUPPORT_LS;
|
||||
pp_state = PP_STATE_LS;
|
||||
}
|
||||
if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) {
|
||||
pp_support_state |= AMD_CG_SUPPORT_MC_MGCG;
|
||||
pp_support_state |= PP_STATE_SUPPORT_CG;
|
||||
pp_state |= PP_STATE_CG;
|
||||
}
|
||||
if (state == AMD_CG_STATE_UNGATE)
|
||||
|
@ -1382,11 +1382,11 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
|
|||
|
||||
if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) {
|
||||
if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) {
|
||||
pp_support_state = AMD_CG_SUPPORT_SDMA_LS;
|
||||
pp_support_state = PP_STATE_SUPPORT_LS;
|
||||
pp_state = PP_STATE_LS;
|
||||
}
|
||||
if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) {
|
||||
pp_support_state |= AMD_CG_SUPPORT_SDMA_MGCG;
|
||||
pp_support_state |= PP_STATE_SUPPORT_CG;
|
||||
pp_state |= PP_STATE_CG;
|
||||
}
|
||||
if (state == AMD_CG_STATE_UNGATE)
|
||||
|
@ -1401,11 +1401,11 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
|
|||
|
||||
if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) {
|
||||
if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
|
||||
pp_support_state = AMD_CG_SUPPORT_HDP_LS;
|
||||
pp_support_state = PP_STATE_SUPPORT_LS;
|
||||
pp_state = PP_STATE_LS;
|
||||
}
|
||||
if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) {
|
||||
pp_support_state |= AMD_CG_SUPPORT_HDP_MGCG;
|
||||
pp_support_state |= PP_STATE_SUPPORT_CG;
|
||||
pp_state |= PP_STATE_CG;
|
||||
}
|
||||
if (state == AMD_CG_STATE_UNGATE)
|
||||
|
|
|
@ -9,14 +9,6 @@ config DRM_AMD_DC
|
|||
support for AMDGPU. This adds required support for Vega and
|
||||
Raven ASICs.
|
||||
|
||||
config DRM_AMD_DC_DCN1_0
|
||||
bool "DCN 1.0 Raven family"
|
||||
depends on DRM_AMD_DC && X86
|
||||
default y
|
||||
help
|
||||
Choose this option if you want to have
|
||||
RV family for display engine
|
||||
|
||||
config DEBUG_KERNEL_DC
|
||||
bool "Enable kgdb break in DC"
|
||||
depends on DRM_AMD_DC
|
||||
|
|
|
@ -97,10 +97,10 @@ share it with drivers. But that's a very long term goal, and by far not just an
|
|||
issue with DC - other drivers, especially around DP sink handling, are equally
|
||||
guilty.
|
||||
|
||||
19. The DC logger is still a rather sore thing, but I know that the DRM_DEBUG
|
||||
stuff just isn't up to the challenges either. We need to figure out something
|
||||
that integrates better with DRM and linux debug printing, while not being
|
||||
useless with filtering output. dynamic debug printing might be an option.
|
||||
19. DONE - The DC logger is still a rather sore thing, but I know that the
|
||||
DRM_DEBUG stuff just isn't up to the challenges either. We need to figure out
|
||||
something that integrates better with DRM and linux debug printing, while not
|
||||
being useless with filtering output. dynamic debug printing might be an option.
|
||||
|
||||
20. Use kernel i2c device to program HDMI retimer. Some boards have an HDMI
|
||||
retimer that we need to program to pass PHY compliance. Currently that's
|
||||
|
|
|
@ -60,7 +60,7 @@
|
|||
|
||||
#include "modules/inc/mod_freesync.h"
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
||||
#ifdef CONFIG_X86
|
||||
#include "ivsrcid/irqsrcs_dcn_1_0.h"
|
||||
|
||||
#include "dcn/dcn_1_0_offset.h"
|
||||
|
@ -1041,7 +1041,7 @@ static void handle_hpd_rx_irq(void *param)
|
|||
if (dc_link->type != dc_connection_mst_branch)
|
||||
mutex_lock(&aconnector->hpd_lock);
|
||||
|
||||
if (dc_link_handle_hpd_rx_irq(dc_link, NULL) &&
|
||||
if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
|
||||
!is_mst_root_connector) {
|
||||
/* Downstream Port status changed. */
|
||||
if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
|
||||
|
@ -1192,7 +1192,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
||||
#ifdef CONFIG_X86
|
||||
/* Register IRQ sources and initialize IRQ callbacks */
|
||||
static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
|
||||
{
|
||||
|
@ -1526,7 +1526,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
|
|||
goto fail;
|
||||
}
|
||||
break;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
||||
#ifdef CONFIG_X86
|
||||
case CHIP_RAVEN:
|
||||
if (dcn10_register_irq_handlers(dm->adev)) {
|
||||
DRM_ERROR("DM: Failed to initialize IRQ\n");
|
||||
|
@ -1725,7 +1725,7 @@ static int dm_early_init(void *handle)
|
|||
adev->mode_info.num_dig = 6;
|
||||
adev->mode_info.plane_type = dm_plane_type_default;
|
||||
break;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
||||
#ifdef CONFIG_X86
|
||||
case CHIP_RAVEN:
|
||||
adev->mode_info.num_crtc = 4;
|
||||
adev->mode_info.num_hpd = 4;
|
||||
|
@ -3094,15 +3094,25 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
|
|||
else
|
||||
domain = AMDGPU_GEM_DOMAIN_VRAM;
|
||||
|
||||
r = amdgpu_bo_pin(rbo, domain, &afb->address);
|
||||
amdgpu_bo_unreserve(rbo);
|
||||
|
||||
r = amdgpu_bo_pin(rbo, domain);
|
||||
if (unlikely(r != 0)) {
|
||||
if (r != -ERESTARTSYS)
|
||||
DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
|
||||
amdgpu_bo_unreserve(rbo);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_ttm_alloc_gart(&rbo->tbo);
|
||||
if (unlikely(r != 0)) {
|
||||
amdgpu_bo_unpin(rbo);
|
||||
amdgpu_bo_unreserve(rbo);
|
||||
DRM_ERROR("%p bind failed\n", rbo);
|
||||
return r;
|
||||
}
|
||||
amdgpu_bo_unreserve(rbo);
|
||||
|
||||
afb->address = amdgpu_bo_gpu_offset(rbo);
|
||||
|
||||
amdgpu_bo_ref(rbo);
|
||||
|
||||
if (dm_plane_state_new->dc_state &&
|
||||
|
@ -3499,7 +3509,6 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
|
|||
aconnector->base.stereo_allowed = false;
|
||||
aconnector->base.dpms = DRM_MODE_DPMS_OFF;
|
||||
aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
|
||||
|
||||
mutex_init(&aconnector->hpd_lock);
|
||||
|
||||
/* configure support HPD hot plug connector_>polled default value is 0
|
||||
|
@ -3508,9 +3517,13 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
|
|||
switch (connector_type) {
|
||||
case DRM_MODE_CONNECTOR_HDMIA:
|
||||
aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
|
||||
aconnector->base.ycbcr_420_allowed =
|
||||
link->link_enc->features.ycbcr420_supported ? true : false;
|
||||
break;
|
||||
case DRM_MODE_CONNECTOR_DisplayPort:
|
||||
aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
|
||||
aconnector->base.ycbcr_420_allowed =
|
||||
link->link_enc->features.ycbcr420_supported ? true : false;
|
||||
break;
|
||||
case DRM_MODE_CONNECTOR_DVID:
|
||||
aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
|
||||
|
|
|
@ -26,113 +26,667 @@
|
|||
#include <linux/debugfs.h>
|
||||
|
||||
#include "dc.h"
|
||||
#include "dc_link.h"
|
||||
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_dm.h"
|
||||
#include "amdgpu_dm_debugfs.h"
|
||||
|
||||
static ssize_t dp_link_rate_debugfs_read(struct file *f, char __user *buf,
|
||||
/* function description
|
||||
* get/ set DP configuration: lane_count, link_rate, spread_spectrum
|
||||
*
|
||||
* valid lane count value: 1, 2, 4
|
||||
* valid link rate value:
|
||||
* 06h = 1.62Gbps per lane
|
||||
* 0Ah = 2.7Gbps per lane
|
||||
* 0Ch = 3.24Gbps per lane
|
||||
* 14h = 5.4Gbps per lane
|
||||
* 1Eh = 8.1Gbps per lane
|
||||
*
|
||||
* debugfs is located at /sys/kernel/debug/dri/0/DP-x/link_settings
|
||||
*
|
||||
* --- to get dp configuration
|
||||
*
|
||||
* cat link_settings
|
||||
*
|
||||
* It will list current, verified, reported, preferred dp configuration.
|
||||
* current -- for current video mode
|
||||
* verified --- maximum configuration which pass link training
|
||||
* reported --- DP rx report caps (DPCD register offset 0, 1 2)
|
||||
* preferred --- user force settings
|
||||
*
|
||||
* --- set (or force) dp configuration
|
||||
*
|
||||
* echo <lane_count> <link_rate> > link_settings
|
||||
*
|
||||
* for example, to force to 2 lane, 2.7GHz,
|
||||
* echo 4 0xa > link_settings
|
||||
*
|
||||
* spread_spectrum could not be changed dynamically.
|
||||
*
|
||||
* in case invalid lane count, link rate are force, no hw programming will be
|
||||
* done. please check link settings after force operation to see if HW get
|
||||
* programming.
|
||||
*
|
||||
* cat link_settings
|
||||
*
|
||||
* check current and preferred settings.
|
||||
*
|
||||
*/
|
||||
static ssize_t dp_link_settings_read(struct file *f, char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
/* TODO: create method to read link rate */
|
||||
return 1;
|
||||
struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
|
||||
struct dc_link *link = connector->dc_link;
|
||||
char *rd_buf = NULL;
|
||||
char *rd_buf_ptr = NULL;
|
||||
const uint32_t rd_buf_size = 100;
|
||||
uint32_t result = 0;
|
||||
uint8_t str_len = 0;
|
||||
int r;
|
||||
|
||||
if (*pos & 3 || size & 3)
|
||||
return -EINVAL;
|
||||
|
||||
rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
|
||||
if (!rd_buf)
|
||||
return 0;
|
||||
|
||||
rd_buf_ptr = rd_buf;
|
||||
|
||||
str_len = strlen("Current: %d %d %d ");
|
||||
snprintf(rd_buf_ptr, str_len, "Current: %d %d %d ",
|
||||
link->cur_link_settings.lane_count,
|
||||
link->cur_link_settings.link_rate,
|
||||
link->cur_link_settings.link_spread);
|
||||
rd_buf_ptr += str_len;
|
||||
|
||||
str_len = strlen("Verified: %d %d %d ");
|
||||
snprintf(rd_buf_ptr, str_len, "Verified: %d %d %d ",
|
||||
link->verified_link_cap.lane_count,
|
||||
link->verified_link_cap.link_rate,
|
||||
link->verified_link_cap.link_spread);
|
||||
rd_buf_ptr += str_len;
|
||||
|
||||
str_len = strlen("Reported: %d %d %d ");
|
||||
snprintf(rd_buf_ptr, str_len, "Reported: %d %d %d ",
|
||||
link->reported_link_cap.lane_count,
|
||||
link->reported_link_cap.link_rate,
|
||||
link->reported_link_cap.link_spread);
|
||||
rd_buf_ptr += str_len;
|
||||
|
||||
str_len = strlen("Preferred: %d %d %d ");
|
||||
snprintf(rd_buf_ptr, str_len, "Preferred: %d %d %d\n",
|
||||
link->preferred_link_setting.lane_count,
|
||||
link->preferred_link_setting.link_rate,
|
||||
link->preferred_link_setting.link_spread);
|
||||
|
||||
while (size) {
|
||||
if (*pos >= rd_buf_size)
|
||||
break;
|
||||
|
||||
r = put_user(*(rd_buf + result), buf);
|
||||
if (r)
|
||||
return r; /* r = -EFAULT */
|
||||
|
||||
buf += 1;
|
||||
size -= 1;
|
||||
*pos += 1;
|
||||
result += 1;
|
||||
}
|
||||
|
||||
kfree(rd_buf);
|
||||
return result;
|
||||
}
|
||||
|
||||
static ssize_t dp_link_rate_debugfs_write(struct file *f, const char __user *buf,
|
||||
static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
/* TODO: create method to write link rate */
|
||||
return 1;
|
||||
struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
|
||||
struct dc_link *link = connector->dc_link;
|
||||
struct dc *dc = (struct dc *)link->dc;
|
||||
struct dc_link_settings prefer_link_settings;
|
||||
char *wr_buf = NULL;
|
||||
char *wr_buf_ptr = NULL;
|
||||
const uint32_t wr_buf_size = 40;
|
||||
int r;
|
||||
int bytes_from_user;
|
||||
char *sub_str;
|
||||
/* 0: lane_count; 1: link_rate */
|
||||
uint8_t param_index = 0;
|
||||
long param[2];
|
||||
const char delimiter[3] = {' ', '\n', '\0'};
|
||||
bool valid_input = false;
|
||||
|
||||
if (size == 0)
|
||||
return -EINVAL;
|
||||
|
||||
wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
|
||||
if (!wr_buf)
|
||||
return -EINVAL;
|
||||
wr_buf_ptr = wr_buf;
|
||||
|
||||
r = copy_from_user(wr_buf_ptr, buf, wr_buf_size);
|
||||
|
||||
/* r is bytes not be copied */
|
||||
if (r >= wr_buf_size) {
|
||||
kfree(wr_buf);
|
||||
DRM_DEBUG_DRIVER("user data not read\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
bytes_from_user = wr_buf_size - r;
|
||||
|
||||
while (isspace(*wr_buf_ptr))
|
||||
wr_buf_ptr++;
|
||||
|
||||
while ((*wr_buf_ptr != '\0') && (param_index < 2)) {
|
||||
|
||||
sub_str = strsep(&wr_buf_ptr, delimiter);
|
||||
|
||||
r = kstrtol(sub_str, 16, ¶m[param_index]);
|
||||
|
||||
if (r)
|
||||
DRM_DEBUG_DRIVER("string to int convert error code: %d\n", r);
|
||||
|
||||
param_index++;
|
||||
while (isspace(*wr_buf_ptr))
|
||||
wr_buf_ptr++;
|
||||
}
|
||||
|
||||
switch (param[0]) {
|
||||
case LANE_COUNT_ONE:
|
||||
case LANE_COUNT_TWO:
|
||||
case LANE_COUNT_FOUR:
|
||||
valid_input = true;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
switch (param[1]) {
|
||||
case LINK_RATE_LOW:
|
||||
case LINK_RATE_HIGH:
|
||||
case LINK_RATE_RBR2:
|
||||
case LINK_RATE_HIGH2:
|
||||
case LINK_RATE_HIGH3:
|
||||
valid_input = true;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (!valid_input) {
|
||||
kfree(wr_buf);
|
||||
DRM_DEBUG_DRIVER("Invalid Input value No HW will be programmed\n");
|
||||
return bytes_from_user;
|
||||
}
|
||||
|
||||
/* save user force lane_count, link_rate to preferred settings
|
||||
* spread spectrum will not be changed
|
||||
*/
|
||||
prefer_link_settings.link_spread = link->cur_link_settings.link_spread;
|
||||
prefer_link_settings.lane_count = param[0];
|
||||
prefer_link_settings.link_rate = param[1];
|
||||
|
||||
dc_link_set_preferred_link_settings(dc, &prefer_link_settings, link);
|
||||
|
||||
kfree(wr_buf);
|
||||
return bytes_from_user;
|
||||
}
|
||||
|
||||
static ssize_t dp_lane_count_debugfs_read(struct file *f, char __user *buf,
|
||||
/* function: get current DP PHY settings: voltage swing, pre-emphasis,
|
||||
* post-cursor2 (defined by VESA DP specification)
|
||||
*
|
||||
* valid values
|
||||
* voltage swing: 0,1,2,3
|
||||
* pre-emphasis : 0,1,2,3
|
||||
* post cursor2 : 0,1,2,3
|
||||
*
|
||||
*
|
||||
* how to use this debugfs
|
||||
*
|
||||
* debugfs is located at /sys/kernel/debug/dri/0/DP-x
|
||||
*
|
||||
* there will be directories, like DP-1, DP-2,DP-3, etc. for DP display
|
||||
*
|
||||
* To figure out which DP-x is the display for DP to be check,
|
||||
* cd DP-x
|
||||
* ls -ll
|
||||
* There should be debugfs file, like link_settings, phy_settings.
|
||||
* cat link_settings
|
||||
* from lane_count, link_rate to figure which DP-x is for display to be worked
|
||||
* on
|
||||
*
|
||||
* To get current DP PHY settings,
|
||||
* cat phy_settings
|
||||
*
|
||||
* To change DP PHY settings,
|
||||
* echo <voltage_swing> <pre-emphasis> <post_cursor2> > phy_settings
|
||||
* for examle, to change voltage swing to 2, pre-emphasis to 3, post_cursor2 to
|
||||
* 0,
|
||||
* echo 2 3 0 > phy_settings
|
||||
*
|
||||
* To check if change be applied, get current phy settings by
|
||||
* cat phy_settings
|
||||
*
|
||||
* In case invalid values are set by user, like
|
||||
* echo 1 4 0 > phy_settings
|
||||
*
|
||||
* HW will NOT be programmed by these settings.
|
||||
* cat phy_settings will show the previous valid settings.
|
||||
*/
|
||||
static ssize_t dp_phy_settings_read(struct file *f, char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
/* TODO: create method to read lane count */
|
||||
return 1;
|
||||
struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
|
||||
struct dc_link *link = connector->dc_link;
|
||||
char *rd_buf = NULL;
|
||||
const uint32_t rd_buf_size = 20;
|
||||
uint32_t result = 0;
|
||||
int r;
|
||||
|
||||
if (*pos & 3 || size & 3)
|
||||
return -EINVAL;
|
||||
|
||||
rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
|
||||
if (!rd_buf)
|
||||
return -EINVAL;
|
||||
|
||||
snprintf(rd_buf, rd_buf_size, " %d %d %d ",
|
||||
link->cur_lane_setting.VOLTAGE_SWING,
|
||||
link->cur_lane_setting.PRE_EMPHASIS,
|
||||
link->cur_lane_setting.POST_CURSOR2);
|
||||
|
||||
while (size) {
|
||||
if (*pos >= rd_buf_size)
|
||||
break;
|
||||
|
||||
r = put_user((*(rd_buf + result)), buf);
|
||||
if (r)
|
||||
return r; /* r = -EFAULT */
|
||||
|
||||
buf += 1;
|
||||
size -= 1;
|
||||
*pos += 1;
|
||||
result += 1;
|
||||
}
|
||||
|
||||
kfree(rd_buf);
|
||||
return result;
|
||||
}
|
||||
|
||||
static ssize_t dp_lane_count_debugfs_write(struct file *f, const char __user *buf,
|
||||
static ssize_t dp_phy_settings_write(struct file *f, const char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
/* TODO: create method to write lane count */
|
||||
return 1;
|
||||
}
|
||||
|
||||
static ssize_t dp_voltage_swing_debugfs_read(struct file *f, char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
/* TODO: create method to read voltage swing */
|
||||
return 1;
|
||||
}
|
||||
|
||||
static ssize_t dp_voltage_swing_debugfs_write(struct file *f, const char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
/* TODO: create method to write voltage swing */
|
||||
return 1;
|
||||
}
|
||||
|
||||
static ssize_t dp_pre_emphasis_debugfs_read(struct file *f, char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
/* TODO: create method to read pre-emphasis */
|
||||
return 1;
|
||||
}
|
||||
|
||||
static ssize_t dp_pre_emphasis_debugfs_write(struct file *f, const char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
/* TODO: create method to write pre-emphasis */
|
||||
return 1;
|
||||
}
|
||||
|
||||
static ssize_t dp_phy_test_pattern_debugfs_read(struct file *f, char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
/* TODO: create method to read PHY test pattern */
|
||||
return 1;
|
||||
struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
|
||||
struct dc_link *link = connector->dc_link;
|
||||
struct dc *dc = (struct dc *)link->dc;
|
||||
char *wr_buf = NULL;
|
||||
char *wr_buf_ptr = NULL;
|
||||
uint32_t wr_buf_size = 40;
|
||||
int r;
|
||||
int bytes_from_user;
|
||||
char *sub_str;
|
||||
uint8_t param_index = 0;
|
||||
long param[3];
|
||||
const char delimiter[3] = {' ', '\n', '\0'};
|
||||
bool use_prefer_link_setting;
|
||||
struct link_training_settings link_lane_settings;
|
||||
|
||||
if (size == 0)
|
||||
return 0;
|
||||
|
||||
wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
|
||||
if (!wr_buf)
|
||||
return 0;
|
||||
wr_buf_ptr = wr_buf;
|
||||
|
||||
r = copy_from_user(wr_buf_ptr, buf, wr_buf_size);
|
||||
|
||||
/* r is bytes not be copied */
|
||||
if (r >= wr_buf_size) {
|
||||
kfree(wr_buf);
|
||||
DRM_DEBUG_DRIVER("user data not be read\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
bytes_from_user = wr_buf_size - r;
|
||||
|
||||
while (isspace(*wr_buf_ptr))
|
||||
wr_buf_ptr++;
|
||||
|
||||
while ((*wr_buf_ptr != '\0') && (param_index < 3)) {
|
||||
|
||||
sub_str = strsep(&wr_buf_ptr, delimiter);
|
||||
|
||||
r = kstrtol(sub_str, 16, ¶m[param_index]);
|
||||
|
||||
if (r)
|
||||
DRM_DEBUG_DRIVER("string to int convert error code: %d\n", r);
|
||||
|
||||
param_index++;
|
||||
while (isspace(*wr_buf_ptr))
|
||||
wr_buf_ptr++;
|
||||
}
|
||||
|
||||
if ((param[0] > VOLTAGE_SWING_MAX_LEVEL) ||
|
||||
(param[1] > PRE_EMPHASIS_MAX_LEVEL) ||
|
||||
(param[2] > POST_CURSOR2_MAX_LEVEL)) {
|
||||
kfree(wr_buf);
|
||||
DRM_DEBUG_DRIVER("Invalid Input No HW will be programmed\n");
|
||||
return bytes_from_user;
|
||||
}
|
||||
|
||||
/* get link settings: lane count, link rate */
|
||||
use_prefer_link_setting =
|
||||
((link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN) &&
|
||||
(link->test_pattern_enabled));
|
||||
|
||||
memset(&link_lane_settings, 0, sizeof(link_lane_settings));
|
||||
|
||||
if (use_prefer_link_setting) {
|
||||
link_lane_settings.link_settings.lane_count =
|
||||
link->preferred_link_setting.lane_count;
|
||||
link_lane_settings.link_settings.link_rate =
|
||||
link->preferred_link_setting.link_rate;
|
||||
link_lane_settings.link_settings.link_spread =
|
||||
link->preferred_link_setting.link_spread;
|
||||
} else {
|
||||
link_lane_settings.link_settings.lane_count =
|
||||
link->cur_link_settings.lane_count;
|
||||
link_lane_settings.link_settings.link_rate =
|
||||
link->cur_link_settings.link_rate;
|
||||
link_lane_settings.link_settings.link_spread =
|
||||
link->cur_link_settings.link_spread;
|
||||
}
|
||||
|
||||
/* apply phy settings from user */
|
||||
for (r = 0; r < link_lane_settings.link_settings.lane_count; r++) {
|
||||
link_lane_settings.lane_settings[r].VOLTAGE_SWING =
|
||||
(enum dc_voltage_swing) (param[0]);
|
||||
link_lane_settings.lane_settings[r].PRE_EMPHASIS =
|
||||
(enum dc_pre_emphasis) (param[1]);
|
||||
link_lane_settings.lane_settings[r].POST_CURSOR2 =
|
||||
(enum dc_post_cursor2) (param[2]);
|
||||
}
|
||||
|
||||
/* program ASIC registers and DPCD registers */
|
||||
dc_link_set_drive_settings(dc, &link_lane_settings, link);
|
||||
|
||||
kfree(wr_buf);
|
||||
return bytes_from_user;
|
||||
}
|
||||
|
||||
/* function description
|
||||
*
|
||||
* set PHY layer or Link layer test pattern
|
||||
* PHY test pattern is used for PHY SI check.
|
||||
* Link layer test will not affect PHY SI.
|
||||
*
|
||||
* Reset Test Pattern:
|
||||
* 0 = DP_TEST_PATTERN_VIDEO_MODE
|
||||
*
|
||||
* PHY test pattern supported:
|
||||
* 1 = DP_TEST_PATTERN_D102
|
||||
* 2 = DP_TEST_PATTERN_SYMBOL_ERROR
|
||||
* 3 = DP_TEST_PATTERN_PRBS7
|
||||
* 4 = DP_TEST_PATTERN_80BIT_CUSTOM
|
||||
* 5 = DP_TEST_PATTERN_CP2520_1
|
||||
* 6 = DP_TEST_PATTERN_CP2520_2 = DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE
|
||||
* 7 = DP_TEST_PATTERN_CP2520_3
|
||||
*
|
||||
* DP PHY Link Training Patterns
|
||||
* 8 = DP_TEST_PATTERN_TRAINING_PATTERN1
|
||||
* 9 = DP_TEST_PATTERN_TRAINING_PATTERN2
|
||||
* a = DP_TEST_PATTERN_TRAINING_PATTERN3
|
||||
* b = DP_TEST_PATTERN_TRAINING_PATTERN4
|
||||
*
|
||||
* DP Link Layer Test pattern
|
||||
* c = DP_TEST_PATTERN_COLOR_SQUARES
|
||||
* d = DP_TEST_PATTERN_COLOR_SQUARES_CEA
|
||||
* e = DP_TEST_PATTERN_VERTICAL_BARS
|
||||
* f = DP_TEST_PATTERN_HORIZONTAL_BARS
|
||||
* 10= DP_TEST_PATTERN_COLOR_RAMP
|
||||
*
|
||||
* debugfs phy_test_pattern is located at /syskernel/debug/dri/0/DP-x
|
||||
*
|
||||
* --- set test pattern
|
||||
* echo <test pattern #> > test_pattern
|
||||
*
|
||||
* If test pattern # is not supported, NO HW programming will be done.
|
||||
* for DP_TEST_PATTERN_80BIT_CUSTOM, it needs extra 10 bytes of data
|
||||
* for the user pattern. input 10 bytes data are separated by space
|
||||
*
|
||||
* echo 0x4 0x11 0x22 0x33 0x44 0x55 0x66 0x77 0x88 0x99 0xaa > test_pattern
|
||||
*
|
||||
* --- reset test pattern
|
||||
* echo 0 > test_pattern
|
||||
*
|
||||
* --- HPD detection is disabled when set PHY test pattern
|
||||
*
|
||||
* when PHY test pattern (pattern # within [1,7]) is set, HPD pin of HW ASIC
|
||||
* is disable. User could unplug DP display from DP connected and plug scope to
|
||||
* check test pattern PHY SI.
|
||||
* If there is need unplug scope and plug DP display back, do steps below:
|
||||
* echo 0 > phy_test_pattern
|
||||
* unplug scope
|
||||
* plug DP display.
|
||||
*
|
||||
* "echo 0 > phy_test_pattern" will re-enable HPD pin again so that video sw
|
||||
* driver could detect "unplug scope" and "plug DP display"
|
||||
*/
|
||||
static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
/* TODO: create method to write PHY test pattern */
|
||||
return 1;
|
||||
struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
|
||||
struct dc_link *link = connector->dc_link;
|
||||
char *wr_buf = NULL;
|
||||
char *wr_buf_ptr = NULL;
|
||||
uint32_t wr_buf_size = 100;
|
||||
uint32_t wr_buf_count = 0;
|
||||
int r;
|
||||
int bytes_from_user;
|
||||
char *sub_str = NULL;
|
||||
uint8_t param_index = 0;
|
||||
uint8_t param_nums = 0;
|
||||
long param[11] = {0x0};
|
||||
const char delimiter[3] = {' ', '\n', '\0'};
|
||||
enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
|
||||
bool disable_hpd = false;
|
||||
bool valid_test_pattern = false;
|
||||
/* init with defalut 80bit custom pattern */
|
||||
uint8_t custom_pattern[10] = {
|
||||
0x1f, 0x7c, 0xf0, 0xc1, 0x07,
|
||||
0x1f, 0x7c, 0xf0, 0xc1, 0x07
|
||||
};
|
||||
struct dc_link_settings prefer_link_settings = {LANE_COUNT_UNKNOWN,
|
||||
LINK_RATE_UNKNOWN, LINK_SPREAD_DISABLED};
|
||||
struct dc_link_settings cur_link_settings = {LANE_COUNT_UNKNOWN,
|
||||
LINK_RATE_UNKNOWN, LINK_SPREAD_DISABLED};
|
||||
struct link_training_settings link_training_settings;
|
||||
int i;
|
||||
|
||||
if (size == 0)
|
||||
return 0;
|
||||
|
||||
wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
|
||||
if (!wr_buf)
|
||||
return 0;
|
||||
wr_buf_ptr = wr_buf;
|
||||
|
||||
r = copy_from_user(wr_buf_ptr, buf, wr_buf_size);
|
||||
|
||||
/* r is bytes not be copied */
|
||||
if (r >= wr_buf_size) {
|
||||
kfree(wr_buf);
|
||||
DRM_DEBUG_DRIVER("user data not be read\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
bytes_from_user = wr_buf_size - r;
|
||||
|
||||
/* check number of parameters. isspace could not differ space and \n */
|
||||
while ((*wr_buf_ptr != 0xa) && (wr_buf_count < wr_buf_size)) {
|
||||
/* skip space*/
|
||||
while (isspace(*wr_buf_ptr) && (wr_buf_count < wr_buf_size)) {
|
||||
wr_buf_ptr++;
|
||||
wr_buf_count++;
|
||||
}
|
||||
|
||||
if (wr_buf_count == wr_buf_size)
|
||||
break;
|
||||
|
||||
/* skip non-space*/
|
||||
while ((!isspace(*wr_buf_ptr)) && (wr_buf_count < wr_buf_size)) {
|
||||
wr_buf_ptr++;
|
||||
wr_buf_count++;
|
||||
}
|
||||
|
||||
param_nums++;
|
||||
|
||||
if (wr_buf_count == wr_buf_size)
|
||||
break;
|
||||
}
|
||||
|
||||
/* max 11 parameters */
|
||||
if (param_nums > 11)
|
||||
param_nums = 11;
|
||||
|
||||
wr_buf_ptr = wr_buf; /* reset buf pinter */
|
||||
wr_buf_count = 0; /* number of char already checked */
|
||||
|
||||
while (isspace(*wr_buf_ptr) && (wr_buf_count < wr_buf_size)) {
|
||||
wr_buf_ptr++;
|
||||
wr_buf_count++;
|
||||
}
|
||||
|
||||
while (param_index < param_nums) {
|
||||
/* after strsep, wr_buf_ptr will be moved to after space */
|
||||
sub_str = strsep(&wr_buf_ptr, delimiter);
|
||||
|
||||
r = kstrtol(sub_str, 16, ¶m[param_index]);
|
||||
|
||||
if (r)
|
||||
DRM_DEBUG_DRIVER("string to int convert error code: %d\n", r);
|
||||
|
||||
param_index++;
|
||||
}
|
||||
|
||||
test_pattern = param[0];
|
||||
|
||||
switch (test_pattern) {
|
||||
case DP_TEST_PATTERN_VIDEO_MODE:
|
||||
case DP_TEST_PATTERN_COLOR_SQUARES:
|
||||
case DP_TEST_PATTERN_COLOR_SQUARES_CEA:
|
||||
case DP_TEST_PATTERN_VERTICAL_BARS:
|
||||
case DP_TEST_PATTERN_HORIZONTAL_BARS:
|
||||
case DP_TEST_PATTERN_COLOR_RAMP:
|
||||
valid_test_pattern = true;
|
||||
break;
|
||||
|
||||
case DP_TEST_PATTERN_D102:
|
||||
case DP_TEST_PATTERN_SYMBOL_ERROR:
|
||||
case DP_TEST_PATTERN_PRBS7:
|
||||
case DP_TEST_PATTERN_80BIT_CUSTOM:
|
||||
case DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE:
|
||||
case DP_TEST_PATTERN_TRAINING_PATTERN4:
|
||||
disable_hpd = true;
|
||||
valid_test_pattern = true;
|
||||
break;
|
||||
|
||||
default:
|
||||
valid_test_pattern = false;
|
||||
test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!valid_test_pattern) {
|
||||
kfree(wr_buf);
|
||||
DRM_DEBUG_DRIVER("Invalid Test Pattern Parameters\n");
|
||||
return bytes_from_user;
|
||||
}
|
||||
|
||||
if (test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM) {
|
||||
for (i = 0; i < 10; i++) {
|
||||
if ((uint8_t) param[i + 1] != 0x0)
|
||||
break;
|
||||
}
|
||||
|
||||
if (i < 10) {
|
||||
/* not use default value */
|
||||
for (i = 0; i < 10; i++)
|
||||
custom_pattern[i] = (uint8_t) param[i + 1];
|
||||
}
|
||||
}
|
||||
|
||||
/* Usage: set DP physical test pattern using debugfs with normal DP
|
||||
* panel. Then plug out DP panel and connect a scope to measure
|
||||
* For normal video mode and test pattern generated from CRCT,
|
||||
* they are visibile to user. So do not disable HPD.
|
||||
* Video Mode is also set to clear the test pattern, so enable HPD
|
||||
* because it might have been disabled after a test pattern was set.
|
||||
* AUX depends on HPD * sequence dependent, do not move!
|
||||
*/
|
||||
if (!disable_hpd)
|
||||
dc_link_enable_hpd(link);
|
||||
|
||||
prefer_link_settings.lane_count = link->verified_link_cap.lane_count;
|
||||
prefer_link_settings.link_rate = link->verified_link_cap.link_rate;
|
||||
prefer_link_settings.link_spread = link->verified_link_cap.link_spread;
|
||||
|
||||
cur_link_settings.lane_count = link->cur_link_settings.lane_count;
|
||||
cur_link_settings.link_rate = link->cur_link_settings.link_rate;
|
||||
cur_link_settings.link_spread = link->cur_link_settings.link_spread;
|
||||
|
||||
link_training_settings.link_settings = cur_link_settings;
|
||||
|
||||
|
||||
if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) {
|
||||
if (prefer_link_settings.lane_count != LANE_COUNT_UNKNOWN &&
|
||||
prefer_link_settings.link_rate != LINK_RATE_UNKNOWN &&
|
||||
(prefer_link_settings.lane_count != cur_link_settings.lane_count ||
|
||||
prefer_link_settings.link_rate != cur_link_settings.link_rate))
|
||||
link_training_settings.link_settings = prefer_link_settings;
|
||||
}
|
||||
|
||||
for (i = 0; i < (unsigned int)(link_training_settings.link_settings.lane_count); i++)
|
||||
link_training_settings.lane_settings[i] = link->cur_lane_setting;
|
||||
|
||||
dc_link_set_test_pattern(
|
||||
link,
|
||||
test_pattern,
|
||||
&link_training_settings,
|
||||
custom_pattern,
|
||||
10);
|
||||
|
||||
/* Usage: Set DP physical test pattern using AMDDP with normal DP panel
|
||||
* Then plug out DP panel and connect a scope to measure DP PHY signal.
|
||||
* Need disable interrupt to avoid SW driver disable DP output. This is
|
||||
* done after the test pattern is set.
|
||||
*/
|
||||
if (valid_test_pattern && disable_hpd)
|
||||
dc_link_disable_hpd(link);
|
||||
|
||||
kfree(wr_buf);
|
||||
|
||||
return bytes_from_user;
|
||||
}
|
||||
|
||||
static const struct file_operations dp_link_rate_fops = {
|
||||
static const struct file_operations dp_link_settings_debugfs_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.read = dp_link_rate_debugfs_read,
|
||||
.write = dp_link_rate_debugfs_write,
|
||||
.read = dp_link_settings_read,
|
||||
.write = dp_link_settings_write,
|
||||
.llseek = default_llseek
|
||||
};
|
||||
|
||||
static const struct file_operations dp_lane_count_fops = {
|
||||
static const struct file_operations dp_phy_settings_debugfs_fop = {
|
||||
.owner = THIS_MODULE,
|
||||
.read = dp_lane_count_debugfs_read,
|
||||
.write = dp_lane_count_debugfs_write,
|
||||
.llseek = default_llseek
|
||||
};
|
||||
|
||||
static const struct file_operations dp_voltage_swing_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.read = dp_voltage_swing_debugfs_read,
|
||||
.write = dp_voltage_swing_debugfs_write,
|
||||
.llseek = default_llseek
|
||||
};
|
||||
|
||||
static const struct file_operations dp_pre_emphasis_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.read = dp_pre_emphasis_debugfs_read,
|
||||
.write = dp_pre_emphasis_debugfs_write,
|
||||
.read = dp_phy_settings_read,
|
||||
.write = dp_phy_settings_write,
|
||||
.llseek = default_llseek
|
||||
};
|
||||
|
||||
static const struct file_operations dp_phy_test_pattern_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.read = dp_phy_test_pattern_debugfs_read,
|
||||
.write = dp_phy_test_pattern_debugfs_write,
|
||||
.llseek = default_llseek
|
||||
};
|
||||
|
@ -141,11 +695,9 @@ static const struct {
|
|||
char *name;
|
||||
const struct file_operations *fops;
|
||||
} dp_debugfs_entries[] = {
|
||||
{"link_rate", &dp_link_rate_fops},
|
||||
{"lane_count", &dp_lane_count_fops},
|
||||
{"voltage_swing", &dp_voltage_swing_fops},
|
||||
{"pre_emphasis", &dp_pre_emphasis_fops},
|
||||
{"phy_test_pattern", &dp_phy_test_pattern_fops}
|
||||
{"link_settings", &dp_link_settings_debugfs_fops},
|
||||
{"phy_settings", &dp_phy_settings_debugfs_fop},
|
||||
{"test_pattern", &dp_phy_test_pattern_fops}
|
||||
};
|
||||
|
||||
int connector_debugfs_init(struct amdgpu_dm_connector *connector)
|
||||
|
|
|
@ -169,6 +169,11 @@ static void get_payload_table(
|
|||
mutex_unlock(&mst_mgr->payload_lock);
|
||||
}
|
||||
|
||||
void dm_helpers_dp_update_branch_info(
|
||||
struct dc_context *ctx,
|
||||
const struct dc_link *link)
|
||||
{}
|
||||
|
||||
/*
|
||||
* Writes payload allocation table in immediate downstream device.
|
||||
*/
|
||||
|
@ -454,6 +459,22 @@ bool dm_helpers_submit_i2c(
|
|||
return result;
|
||||
}
|
||||
|
||||
bool dm_helpers_is_dp_sink_present(struct dc_link *link)
|
||||
{
|
||||
bool dp_sink_present;
|
||||
struct amdgpu_dm_connector *aconnector = link->priv;
|
||||
|
||||
if (!aconnector) {
|
||||
BUG_ON("Failed to found connector for link!");
|
||||
return true;
|
||||
}
|
||||
|
||||
mutex_lock(&aconnector->dm_dp_aux.aux.hw_mutex);
|
||||
dp_sink_present = dc_link_is_dp_sink_present(link);
|
||||
mutex_unlock(&aconnector->dm_dp_aux.aux.hw_mutex);
|
||||
return dp_sink_present;
|
||||
}
|
||||
|
||||
enum dc_edid_status dm_helpers_read_local_edid(
|
||||
struct dc_context *ctx,
|
||||
struct dc_link *link,
|
||||
|
@ -498,8 +519,8 @@ enum dc_edid_status dm_helpers_read_local_edid(
|
|||
edid_status,
|
||||
aconnector->base.name);
|
||||
if (link->aux_mode) {
|
||||
union test_request test_request = {0};
|
||||
union test_response test_response = {0};
|
||||
union test_request test_request = { {0} };
|
||||
union test_response test_response = { {0} };
|
||||
|
||||
dm_helpers_dp_read_dpcd(ctx,
|
||||
link,
|
||||
|
|
|
@ -80,55 +80,72 @@ static void log_dpcd(uint8_t type,
|
|||
static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
|
||||
struct drm_dp_aux_msg *msg)
|
||||
{
|
||||
enum i2c_mot_mode mot = (msg->request & DP_AUX_I2C_MOT) ?
|
||||
I2C_MOT_TRUE : I2C_MOT_FALSE;
|
||||
enum ddc_result res;
|
||||
uint32_t read_bytes = msg->size;
|
||||
ssize_t result = 0;
|
||||
enum i2caux_transaction_action action;
|
||||
enum aux_transaction_type type;
|
||||
|
||||
if (WARN_ON(msg->size > 16))
|
||||
return -E2BIG;
|
||||
|
||||
switch (msg->request & ~DP_AUX_I2C_MOT) {
|
||||
case DP_AUX_NATIVE_READ:
|
||||
res = dal_ddc_service_read_dpcd_data(
|
||||
TO_DM_AUX(aux)->ddc_service,
|
||||
false,
|
||||
I2C_MOT_UNDEF,
|
||||
msg->address,
|
||||
msg->buffer,
|
||||
msg->size,
|
||||
&read_bytes);
|
||||
type = AUX_TRANSACTION_TYPE_DP;
|
||||
action = I2CAUX_TRANSACTION_ACTION_DP_READ;
|
||||
|
||||
result = dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service,
|
||||
msg->address,
|
||||
&msg->reply,
|
||||
msg->buffer,
|
||||
msg->size,
|
||||
type,
|
||||
action);
|
||||
break;
|
||||
case DP_AUX_NATIVE_WRITE:
|
||||
res = dal_ddc_service_write_dpcd_data(
|
||||
TO_DM_AUX(aux)->ddc_service,
|
||||
false,
|
||||
I2C_MOT_UNDEF,
|
||||
msg->address,
|
||||
msg->buffer,
|
||||
msg->size);
|
||||
type = AUX_TRANSACTION_TYPE_DP;
|
||||
action = I2CAUX_TRANSACTION_ACTION_DP_WRITE;
|
||||
|
||||
dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service,
|
||||
msg->address,
|
||||
&msg->reply,
|
||||
msg->buffer,
|
||||
msg->size,
|
||||
type,
|
||||
action);
|
||||
result = msg->size;
|
||||
break;
|
||||
case DP_AUX_I2C_READ:
|
||||
res = dal_ddc_service_read_dpcd_data(
|
||||
TO_DM_AUX(aux)->ddc_service,
|
||||
true,
|
||||
mot,
|
||||
msg->address,
|
||||
msg->buffer,
|
||||
msg->size,
|
||||
&read_bytes);
|
||||
type = AUX_TRANSACTION_TYPE_I2C;
|
||||
if (msg->request & DP_AUX_I2C_MOT)
|
||||
action = I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT;
|
||||
else
|
||||
action = I2CAUX_TRANSACTION_ACTION_I2C_READ;
|
||||
|
||||
result = dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service,
|
||||
msg->address,
|
||||
&msg->reply,
|
||||
msg->buffer,
|
||||
msg->size,
|
||||
type,
|
||||
action);
|
||||
break;
|
||||
case DP_AUX_I2C_WRITE:
|
||||
res = dal_ddc_service_write_dpcd_data(
|
||||
TO_DM_AUX(aux)->ddc_service,
|
||||
true,
|
||||
mot,
|
||||
msg->address,
|
||||
msg->buffer,
|
||||
msg->size);
|
||||
type = AUX_TRANSACTION_TYPE_I2C;
|
||||
if (msg->request & DP_AUX_I2C_MOT)
|
||||
action = I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT;
|
||||
else
|
||||
action = I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
|
||||
|
||||
dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service,
|
||||
msg->address,
|
||||
&msg->reply,
|
||||
msg->buffer,
|
||||
msg->size,
|
||||
type,
|
||||
action);
|
||||
result = msg->size;
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
#ifdef TRACE_DPCD
|
||||
|
@ -139,9 +156,10 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
|
|||
r == DDC_RESULT_SUCESSFULL);
|
||||
#endif
|
||||
|
||||
if (res != DDC_RESULT_SUCESSFULL)
|
||||
return -EIO;
|
||||
return read_bytes;
|
||||
if (result < 0) /* DC doesn't know about kernel error codes */
|
||||
result = -EIO;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static enum drm_connector_status
|
||||
|
|
|
@ -192,6 +192,33 @@ static enum amd_pp_clock_type dc_to_pp_clock_type(
|
|||
return amd_pp_clk_type;
|
||||
}
|
||||
|
||||
static enum dm_pp_clocks_state pp_to_dc_powerlevel_state(
|
||||
enum PP_DAL_POWERLEVEL max_clocks_state)
|
||||
{
|
||||
switch (max_clocks_state) {
|
||||
case PP_DAL_POWERLEVEL_0:
|
||||
return DM_PP_CLOCKS_DPM_STATE_LEVEL_0;
|
||||
case PP_DAL_POWERLEVEL_1:
|
||||
return DM_PP_CLOCKS_DPM_STATE_LEVEL_1;
|
||||
case PP_DAL_POWERLEVEL_2:
|
||||
return DM_PP_CLOCKS_DPM_STATE_LEVEL_2;
|
||||
case PP_DAL_POWERLEVEL_3:
|
||||
return DM_PP_CLOCKS_DPM_STATE_LEVEL_3;
|
||||
case PP_DAL_POWERLEVEL_4:
|
||||
return DM_PP_CLOCKS_DPM_STATE_LEVEL_4;
|
||||
case PP_DAL_POWERLEVEL_5:
|
||||
return DM_PP_CLOCKS_DPM_STATE_LEVEL_5;
|
||||
case PP_DAL_POWERLEVEL_6:
|
||||
return DM_PP_CLOCKS_DPM_STATE_LEVEL_6;
|
||||
case PP_DAL_POWERLEVEL_7:
|
||||
return DM_PP_CLOCKS_DPM_STATE_LEVEL_7;
|
||||
default:
|
||||
DRM_ERROR("DM_PPLIB: invalid powerlevel state: %d!\n",
|
||||
max_clocks_state);
|
||||
return DM_PP_CLOCKS_STATE_INVALID;
|
||||
}
|
||||
}
|
||||
|
||||
static void pp_to_dc_clock_levels(
|
||||
const struct amd_pp_clocks *pp_clks,
|
||||
struct dm_pp_clock_levels *dc_clks,
|
||||
|
@ -441,7 +468,7 @@ bool dm_pp_get_static_clocks(
|
|||
if (ret)
|
||||
return false;
|
||||
|
||||
static_clk_info->max_clocks_state = pp_clk_info.max_clocks_state;
|
||||
static_clk_info->max_clocks_state = pp_to_dc_powerlevel_state(pp_clk_info.max_clocks_state);
|
||||
static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock;
|
||||
static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock;
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
|
||||
DC_LIBS = basics bios calcs dce gpio i2caux irq virtual
|
||||
|
||||
ifdef CONFIG_DRM_AMD_DC_DCN1_0
|
||||
ifdef CONFIG_X86
|
||||
DC_LIBS += dcn10 dml
|
||||
endif
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
# subcomponents.
|
||||
|
||||
BASICS = conversion.o fixpt31_32.o \
|
||||
logger.o log_helpers.o vector.o
|
||||
log_helpers.o vector.o
|
||||
|
||||
AMD_DAL_BASICS = $(addprefix $(AMDDALPATH)/dc/basics/,$(BASICS))
|
||||
|
||||
|
|
|
@ -28,77 +28,12 @@
|
|||
#include "include/logger_interface.h"
|
||||
#include "dm_helpers.h"
|
||||
|
||||
#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
|
||||
|
||||
struct dc_signal_type_info {
|
||||
enum signal_type type;
|
||||
char name[MAX_NAME_LEN];
|
||||
};
|
||||
|
||||
static const struct dc_signal_type_info signal_type_info_tbl[] = {
|
||||
{SIGNAL_TYPE_NONE, "NC"},
|
||||
{SIGNAL_TYPE_DVI_SINGLE_LINK, "DVI"},
|
||||
{SIGNAL_TYPE_DVI_DUAL_LINK, "DDVI"},
|
||||
{SIGNAL_TYPE_HDMI_TYPE_A, "HDMIA"},
|
||||
{SIGNAL_TYPE_LVDS, "LVDS"},
|
||||
{SIGNAL_TYPE_RGB, "VGA"},
|
||||
{SIGNAL_TYPE_DISPLAY_PORT, "DP"},
|
||||
{SIGNAL_TYPE_DISPLAY_PORT_MST, "MST"},
|
||||
{SIGNAL_TYPE_EDP, "eDP"},
|
||||
{SIGNAL_TYPE_VIRTUAL, "Virtual"}
|
||||
};
|
||||
|
||||
void dc_conn_log(struct dc_context *ctx,
|
||||
const struct dc_link *link,
|
||||
uint8_t *hex_data,
|
||||
int hex_data_count,
|
||||
enum dc_log_type event,
|
||||
const char *msg,
|
||||
...)
|
||||
void dc_conn_log_hex_linux(const uint8_t *hex_data, int hex_data_count)
|
||||
{
|
||||
int i;
|
||||
va_list args;
|
||||
struct log_entry entry = { 0 };
|
||||
enum signal_type signal;
|
||||
|
||||
if (link->local_sink)
|
||||
signal = link->local_sink->sink_signal;
|
||||
else
|
||||
signal = link->connector_signal;
|
||||
|
||||
if (link->type == dc_connection_mst_branch)
|
||||
signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
|
||||
|
||||
dm_logger_open(ctx->logger, &entry, event);
|
||||
|
||||
for (i = 0; i < NUM_ELEMENTS(signal_type_info_tbl); i++)
|
||||
if (signal == signal_type_info_tbl[i].type)
|
||||
break;
|
||||
|
||||
if (i == NUM_ELEMENTS(signal_type_info_tbl))
|
||||
goto fail;
|
||||
|
||||
dm_logger_append_heading(&entry);
|
||||
|
||||
dm_logger_append(&entry, "[%s][ConnIdx:%d] ",
|
||||
signal_type_info_tbl[i].name,
|
||||
link->link_index);
|
||||
|
||||
va_start(args, msg);
|
||||
dm_logger_append_va(&entry, msg, args);
|
||||
|
||||
if (entry.buf_offset > 0 &&
|
||||
entry.buf[entry.buf_offset - 1] == '\n')
|
||||
entry.buf_offset--;
|
||||
|
||||
if (hex_data)
|
||||
for (i = 0; i < hex_data_count; i++)
|
||||
dm_logger_append(&entry, "%2.2X ", hex_data[i]);
|
||||
|
||||
dm_logger_append(&entry, "^\n");
|
||||
|
||||
fail:
|
||||
dm_logger_close(&entry);
|
||||
|
||||
va_end(args);
|
||||
DC_LOG_DEBUG("%2.2X ", hex_data[i]);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,406 +0,0 @@
|
|||
/*
|
||||
* Copyright 2012-15 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
#include "dm_services.h"
|
||||
#include "include/logger_interface.h"
|
||||
#include "logger.h"
|
||||
|
||||
|
||||
#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
|
||||
|
||||
static const struct dc_log_type_info log_type_info_tbl[] = {
|
||||
{LOG_ERROR, "Error"},
|
||||
{LOG_WARNING, "Warning"},
|
||||
{LOG_DEBUG, "Debug"},
|
||||
{LOG_DC, "DC_Interface"},
|
||||
{LOG_DTN, "DTN"},
|
||||
{LOG_SURFACE, "Surface"},
|
||||
{LOG_HW_HOTPLUG, "HW_Hotplug"},
|
||||
{LOG_HW_LINK_TRAINING, "HW_LKTN"},
|
||||
{LOG_HW_SET_MODE, "HW_Mode"},
|
||||
{LOG_HW_RESUME_S3, "HW_Resume"},
|
||||
{LOG_HW_AUDIO, "HW_Audio"},
|
||||
{LOG_HW_HPD_IRQ, "HW_HPDIRQ"},
|
||||
{LOG_MST, "MST"},
|
||||
{LOG_SCALER, "Scaler"},
|
||||
{LOG_BIOS, "BIOS"},
|
||||
{LOG_BANDWIDTH_CALCS, "BWCalcs"},
|
||||
{LOG_BANDWIDTH_VALIDATION, "BWValidation"},
|
||||
{LOG_I2C_AUX, "I2C_AUX"},
|
||||
{LOG_SYNC, "Sync"},
|
||||
{LOG_BACKLIGHT, "Backlight"},
|
||||
{LOG_FEATURE_OVERRIDE, "Override"},
|
||||
{LOG_DETECTION_EDID_PARSER, "Edid"},
|
||||
{LOG_DETECTION_DP_CAPS, "DP_Caps"},
|
||||
{LOG_RESOURCE, "Resource"},
|
||||
{LOG_DML, "DML"},
|
||||
{LOG_EVENT_MODE_SET, "Mode"},
|
||||
{LOG_EVENT_DETECTION, "Detect"},
|
||||
{LOG_EVENT_LINK_TRAINING, "LKTN"},
|
||||
{LOG_EVENT_LINK_LOSS, "LinkLoss"},
|
||||
{LOG_EVENT_UNDERFLOW, "Underflow"},
|
||||
{LOG_IF_TRACE, "InterfaceTrace"},
|
||||
{LOG_PERF_TRACE, "PerfTrace"},
|
||||
{LOG_DISPLAYSTATS, "DisplayStats"}
|
||||
};
|
||||
|
||||
|
||||
/* ----------- Object init and destruction ----------- */
|
||||
static bool construct(struct dc_context *ctx, struct dal_logger *logger,
|
||||
uint32_t log_mask)
|
||||
{
|
||||
/* malloc buffer and init offsets */
|
||||
logger->log_buffer_size = DAL_LOGGER_BUFFER_MAX_SIZE;
|
||||
logger->log_buffer = kcalloc(logger->log_buffer_size, sizeof(char),
|
||||
GFP_KERNEL);
|
||||
if (!logger->log_buffer)
|
||||
return false;
|
||||
|
||||
/* Initialize both offsets to start of buffer (empty) */
|
||||
logger->buffer_read_offset = 0;
|
||||
logger->buffer_write_offset = 0;
|
||||
|
||||
logger->open_count = 0;
|
||||
|
||||
logger->flags.bits.ENABLE_CONSOLE = 1;
|
||||
logger->flags.bits.ENABLE_BUFFER = 0;
|
||||
|
||||
logger->ctx = ctx;
|
||||
|
||||
logger->mask = log_mask;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void destruct(struct dal_logger *logger)
|
||||
{
|
||||
if (logger->log_buffer) {
|
||||
kfree(logger->log_buffer);
|
||||
logger->log_buffer = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
struct dal_logger *dal_logger_create(struct dc_context *ctx, uint32_t log_mask)
|
||||
{
|
||||
/* malloc struct */
|
||||
struct dal_logger *logger = kzalloc(sizeof(struct dal_logger),
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!logger)
|
||||
return NULL;
|
||||
if (!construct(ctx, logger, log_mask)) {
|
||||
kfree(logger);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return logger;
|
||||
}
|
||||
|
||||
uint32_t dal_logger_destroy(struct dal_logger **logger)
|
||||
{
|
||||
if (logger == NULL || *logger == NULL)
|
||||
return 1;
|
||||
destruct(*logger);
|
||||
kfree(*logger);
|
||||
*logger = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------------ */
|
||||
void dm_logger_append_heading(struct log_entry *entry)
|
||||
{
|
||||
int j;
|
||||
|
||||
for (j = 0; j < NUM_ELEMENTS(log_type_info_tbl); j++) {
|
||||
|
||||
const struct dc_log_type_info *info = &log_type_info_tbl[j];
|
||||
|
||||
if (info->type == entry->type)
|
||||
dm_logger_append(entry, "[%s]\t", info->name);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Print everything unread existing in log_buffer to debug console*/
|
||||
void dm_logger_flush_buffer(struct dal_logger *logger, bool should_warn)
|
||||
{
|
||||
char *string_start = &logger->log_buffer[logger->buffer_read_offset];
|
||||
|
||||
if (should_warn)
|
||||
dm_output_to_console(
|
||||
"---------------- FLUSHING LOG BUFFER ----------------\n");
|
||||
while (logger->buffer_read_offset < logger->buffer_write_offset) {
|
||||
|
||||
if (logger->log_buffer[logger->buffer_read_offset] == '\0') {
|
||||
dm_output_to_console("%s", string_start);
|
||||
string_start = logger->log_buffer + logger->buffer_read_offset + 1;
|
||||
}
|
||||
logger->buffer_read_offset++;
|
||||
}
|
||||
if (should_warn)
|
||||
dm_output_to_console(
|
||||
"-------------- END FLUSHING LOG BUFFER --------------\n\n");
|
||||
}
|
||||
/* ------------------------------------------------------------------------ */
|
||||
|
||||
/* Warning: Be careful that 'msg' is null terminated and the total size is
|
||||
* less than DAL_LOGGER_BUFFER_MAX_LOG_LINE_SIZE (256) including '\0'
|
||||
*/
|
||||
static bool dal_logger_should_log(
|
||||
struct dal_logger *logger,
|
||||
enum dc_log_type log_type)
|
||||
{
|
||||
if (logger->mask & (1 << log_type))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void log_to_debug_console(struct log_entry *entry)
|
||||
{
|
||||
struct dal_logger *logger = entry->logger;
|
||||
|
||||
if (logger->flags.bits.ENABLE_CONSOLE == 0)
|
||||
return;
|
||||
|
||||
if (entry->buf_offset) {
|
||||
switch (entry->type) {
|
||||
case LOG_ERROR:
|
||||
dm_error("%s", entry->buf);
|
||||
break;
|
||||
default:
|
||||
dm_output_to_console("%s", entry->buf);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void log_to_internal_buffer(struct log_entry *entry)
|
||||
{
|
||||
|
||||
uint32_t size = entry->buf_offset;
|
||||
struct dal_logger *logger = entry->logger;
|
||||
|
||||
if (logger->flags.bits.ENABLE_BUFFER == 0)
|
||||
return;
|
||||
|
||||
if (logger->log_buffer == NULL)
|
||||
return;
|
||||
|
||||
if (size > 0 && size < logger->log_buffer_size) {
|
||||
|
||||
int buffer_space = logger->log_buffer_size -
|
||||
logger->buffer_write_offset;
|
||||
|
||||
if (logger->buffer_write_offset == logger->buffer_read_offset) {
|
||||
/* Buffer is empty, start writing at beginning */
|
||||
buffer_space = logger->log_buffer_size;
|
||||
logger->buffer_write_offset = 0;
|
||||
logger->buffer_read_offset = 0;
|
||||
}
|
||||
|
||||
if (buffer_space > size) {
|
||||
/* No wrap around, copy 'size' bytes
|
||||
* from 'entry->buf' to 'log_buffer'
|
||||
*/
|
||||
memmove(logger->log_buffer +
|
||||
logger->buffer_write_offset,
|
||||
entry->buf, size);
|
||||
logger->buffer_write_offset += size;
|
||||
|
||||
} else {
|
||||
/* Not enough room remaining, we should flush
|
||||
* existing logs */
|
||||
|
||||
/* Flush existing unread logs to console */
|
||||
dm_logger_flush_buffer(logger, true);
|
||||
|
||||
/* Start writing to beginning of buffer */
|
||||
memmove(logger->log_buffer, entry->buf, size);
|
||||
logger->buffer_write_offset = size;
|
||||
logger->buffer_read_offset = 0;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
static void append_entry(
|
||||
struct log_entry *entry,
|
||||
char *buffer,
|
||||
uint32_t buf_size)
|
||||
{
|
||||
if (!entry->buf ||
|
||||
entry->buf_offset + buf_size > entry->max_buf_bytes
|
||||
) {
|
||||
BREAK_TO_DEBUGGER();
|
||||
return;
|
||||
}
|
||||
|
||||
/* Todo: check if off by 1 byte due to \0 anywhere */
|
||||
memmove(entry->buf + entry->buf_offset, buffer, buf_size);
|
||||
entry->buf_offset += buf_size;
|
||||
}
|
||||
|
||||
|
||||
void dm_logger_write(
|
||||
struct dal_logger *logger,
|
||||
enum dc_log_type log_type,
|
||||
const char *msg,
|
||||
...)
|
||||
{
|
||||
if (logger && dal_logger_should_log(logger, log_type)) {
|
||||
uint32_t size;
|
||||
va_list args;
|
||||
char buffer[LOG_MAX_LINE_SIZE];
|
||||
struct log_entry entry;
|
||||
|
||||
va_start(args, msg);
|
||||
|
||||
entry.logger = logger;
|
||||
|
||||
entry.buf = buffer;
|
||||
|
||||
entry.buf_offset = 0;
|
||||
entry.max_buf_bytes = DAL_LOGGER_BUFFER_MAX_SIZE * sizeof(char);
|
||||
|
||||
entry.type = log_type;
|
||||
|
||||
dm_logger_append_heading(&entry);
|
||||
|
||||
size = dm_log_to_buffer(
|
||||
buffer, LOG_MAX_LINE_SIZE - 1, msg, args);
|
||||
|
||||
buffer[entry.buf_offset + size] = '\0';
|
||||
entry.buf_offset += size + 1;
|
||||
|
||||
/* --Flush log_entry buffer-- */
|
||||
/* print to kernel console */
|
||||
log_to_debug_console(&entry);
|
||||
/* log internally for dsat */
|
||||
log_to_internal_buffer(&entry);
|
||||
|
||||
va_end(args);
|
||||
}
|
||||
}
|
||||
|
||||
/* Same as dm_logger_write, except without open() and close(), which must
|
||||
* be done separately.
|
||||
*/
|
||||
void dm_logger_append(
|
||||
struct log_entry *entry,
|
||||
const char *msg,
|
||||
...)
|
||||
{
|
||||
va_list args;
|
||||
|
||||
va_start(args, msg);
|
||||
dm_logger_append_va(entry, msg, args);
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
void dm_logger_append_va(
|
||||
struct log_entry *entry,
|
||||
const char *msg,
|
||||
va_list args)
|
||||
{
|
||||
struct dal_logger *logger;
|
||||
|
||||
if (!entry) {
|
||||
BREAK_TO_DEBUGGER();
|
||||
return;
|
||||
}
|
||||
|
||||
logger = entry->logger;
|
||||
|
||||
if (logger && logger->open_count > 0 &&
|
||||
dal_logger_should_log(logger, entry->type)) {
|
||||
|
||||
uint32_t size;
|
||||
char buffer[LOG_MAX_LINE_SIZE];
|
||||
|
||||
size = dm_log_to_buffer(
|
||||
buffer, LOG_MAX_LINE_SIZE, msg, args);
|
||||
|
||||
if (size < LOG_MAX_LINE_SIZE - 1) {
|
||||
append_entry(entry, buffer, size);
|
||||
} else {
|
||||
append_entry(entry, "LOG_ERROR, line too long\n", 27);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void dm_logger_open(
|
||||
struct dal_logger *logger,
|
||||
struct log_entry *entry, /* out */
|
||||
enum dc_log_type log_type)
|
||||
{
|
||||
if (!entry) {
|
||||
BREAK_TO_DEBUGGER();
|
||||
return;
|
||||
}
|
||||
|
||||
entry->type = log_type;
|
||||
entry->logger = logger;
|
||||
|
||||
entry->buf = kzalloc(DAL_LOGGER_BUFFER_MAX_SIZE,
|
||||
GFP_KERNEL);
|
||||
|
||||
entry->buf_offset = 0;
|
||||
entry->max_buf_bytes = DAL_LOGGER_BUFFER_MAX_SIZE * sizeof(char);
|
||||
|
||||
logger->open_count++;
|
||||
|
||||
dm_logger_append_heading(entry);
|
||||
}
|
||||
|
||||
void dm_logger_close(struct log_entry *entry)
|
||||
{
|
||||
struct dal_logger *logger = entry->logger;
|
||||
|
||||
if (logger && logger->open_count > 0) {
|
||||
logger->open_count--;
|
||||
} else {
|
||||
BREAK_TO_DEBUGGER();
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/* --Flush log_entry buffer-- */
|
||||
/* print to kernel console */
|
||||
log_to_debug_console(entry);
|
||||
/* log internally for dsat */
|
||||
log_to_internal_buffer(entry);
|
||||
|
||||
/* TODO: Write end heading */
|
||||
|
||||
cleanup:
|
||||
if (entry->buf) {
|
||||
kfree(entry->buf);
|
||||
entry->buf = NULL;
|
||||
entry->buf_offset = 0;
|
||||
entry->max_buf_bytes = 0;
|
||||
}
|
||||
}
|
||||
|
|
@ -678,7 +678,7 @@ static enum bp_result bios_parser_get_gpio_pin_info(
|
|||
return BP_RESULT_BADBIOSTABLE;
|
||||
|
||||
if (sizeof(struct atom_common_table_header) +
|
||||
sizeof(struct atom_gpio_pin_lut_v2_1)
|
||||
sizeof(struct atom_gpio_pin_assignment)
|
||||
> le16_to_cpu(header->table_header.structuresize))
|
||||
return BP_RESULT_BADBIOSTABLE;
|
||||
|
||||
|
|
|
@ -55,7 +55,7 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
|
|||
case DCE_VERSION_11_22:
|
||||
*h = dal_cmd_tbl_helper_dce112_get_table2();
|
||||
return true;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
||||
#ifdef CONFIG_X86
|
||||
case DCN_VERSION_1_0:
|
||||
*h = dal_cmd_tbl_helper_dce112_get_table2();
|
||||
return true;
|
||||
|
|
|
@ -38,7 +38,7 @@ CFLAGS_dcn_calc_math.o := $(calcs_ccflags) -Wno-tautological-compare
|
|||
|
||||
BW_CALCS = dce_calcs.o bw_fixed.o custom_float.o
|
||||
|
||||
ifdef CONFIG_DRM_AMD_DC_DCN1_0
|
||||
ifdef CONFIG_X86
|
||||
BW_CALCS += dcn_calcs.o dcn_calc_math.o dcn_calc_auto.o
|
||||
endif
|
||||
|
||||
|
|
|
@ -25,10 +25,9 @@
|
|||
|
||||
#ifndef _CALCS_CALCS_LOGGER_H_
|
||||
#define _CALCS_CALCS_LOGGER_H_
|
||||
#define DC_LOGGER \
|
||||
logger
|
||||
#define DC_LOGGER ctx->logger
|
||||
|
||||
static void print_bw_calcs_dceip(struct dal_logger *logger, const struct bw_calcs_dceip *dceip)
|
||||
static void print_bw_calcs_dceip(struct dc_context *ctx, const struct bw_calcs_dceip *dceip)
|
||||
{
|
||||
|
||||
DC_LOG_BANDWIDTH_CALCS("#####################################################################");
|
||||
|
@ -122,7 +121,7 @@ static void print_bw_calcs_dceip(struct dal_logger *logger, const struct bw_calc
|
|||
|
||||
}
|
||||
|
||||
static void print_bw_calcs_vbios(struct dal_logger *logger, const struct bw_calcs_vbios *vbios)
|
||||
static void print_bw_calcs_vbios(struct dc_context *ctx, const struct bw_calcs_vbios *vbios)
|
||||
{
|
||||
|
||||
DC_LOG_BANDWIDTH_CALCS("#####################################################################");
|
||||
|
@ -181,7 +180,7 @@ static void print_bw_calcs_vbios(struct dal_logger *logger, const struct bw_calc
|
|||
|
||||
}
|
||||
|
||||
static void print_bw_calcs_data(struct dal_logger *logger, struct bw_calcs_data *data)
|
||||
static void print_bw_calcs_data(struct dc_context *ctx, struct bw_calcs_data *data)
|
||||
{
|
||||
|
||||
int i, j, k;
|
||||
|
|
|
@ -3010,9 +3010,9 @@ bool bw_calcs(struct dc_context *ctx,
|
|||
struct bw_fixed low_yclk = vbios->low_yclk;
|
||||
|
||||
if (ctx->dc->debug.bandwidth_calcs_trace) {
|
||||
print_bw_calcs_dceip(ctx->logger, dceip);
|
||||
print_bw_calcs_vbios(ctx->logger, vbios);
|
||||
print_bw_calcs_data(ctx->logger, data);
|
||||
print_bw_calcs_dceip(ctx, dceip);
|
||||
print_bw_calcs_vbios(ctx, vbios);
|
||||
print_bw_calcs_data(ctx, data);
|
||||
}
|
||||
calculate_bandwidth(dceip, vbios, data);
|
||||
|
||||
|
|
|
@ -250,7 +250,24 @@ static void pipe_ctx_to_e2e_pipe_params (
|
|||
else if (pipe->bottom_pipe != NULL && pipe->bottom_pipe->plane_state == pipe->plane_state)
|
||||
input->src.is_hsplit = true;
|
||||
|
||||
input->src.dcc = pipe->plane_state->dcc.enable;
|
||||
if (pipe->plane_res.dpp->ctx->dc->debug.optimized_watermark) {
|
||||
/*
|
||||
* this method requires us to always re-calculate watermark when dcc change
|
||||
* between flip.
|
||||
*/
|
||||
input->src.dcc = pipe->plane_state->dcc.enable ? 1 : 0;
|
||||
} else {
|
||||
/*
|
||||
* allow us to disable dcc on the fly without re-calculating WM
|
||||
*
|
||||
* extra overhead for DCC is quite small. for 1080p WM without
|
||||
* DCC is only 0.417us lower (urgent goes from 6.979us to 6.562us)
|
||||
*/
|
||||
unsigned int bpe;
|
||||
|
||||
input->src.dcc = pipe->plane_res.dpp->ctx->dc->res_pool->hubbub->funcs->
|
||||
dcc_support_pixel_format(pipe->plane_state->format, &bpe) ? 1 : 0;
|
||||
}
|
||||
input->src.dcc_rate = 1;
|
||||
input->src.meta_pitch = pipe->plane_state->dcc.grph.meta_pitch;
|
||||
input->src.source_scan = dm_horz;
|
||||
|
|
|
@ -384,6 +384,71 @@ void dc_stream_set_static_screen_events(struct dc *dc,
|
|||
dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
|
||||
}
|
||||
|
||||
void dc_link_set_drive_settings(struct dc *dc,
|
||||
struct link_training_settings *lt_settings,
|
||||
const struct dc_link *link)
|
||||
{
|
||||
|
||||
int i;
|
||||
|
||||
for (i = 0; i < dc->link_count; i++) {
|
||||
if (dc->links[i] == link)
|
||||
break;
|
||||
}
|
||||
|
||||
if (i >= dc->link_count)
|
||||
ASSERT_CRITICAL(false);
|
||||
|
||||
dc_link_dp_set_drive_settings(dc->links[i], lt_settings);
|
||||
}
|
||||
|
||||
void dc_link_perform_link_training(struct dc *dc,
|
||||
struct dc_link_settings *link_setting,
|
||||
bool skip_video_pattern)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < dc->link_count; i++)
|
||||
dc_link_dp_perform_link_training(
|
||||
dc->links[i],
|
||||
link_setting,
|
||||
skip_video_pattern);
|
||||
}
|
||||
|
||||
void dc_link_set_preferred_link_settings(struct dc *dc,
|
||||
struct dc_link_settings *link_setting,
|
||||
struct dc_link *link)
|
||||
{
|
||||
link->preferred_link_setting = *link_setting;
|
||||
dp_retrain_link_dp_test(link, link_setting, false);
|
||||
}
|
||||
|
||||
void dc_link_enable_hpd(const struct dc_link *link)
|
||||
{
|
||||
dc_link_dp_enable_hpd(link);
|
||||
}
|
||||
|
||||
void dc_link_disable_hpd(const struct dc_link *link)
|
||||
{
|
||||
dc_link_dp_disable_hpd(link);
|
||||
}
|
||||
|
||||
|
||||
void dc_link_set_test_pattern(struct dc_link *link,
|
||||
enum dp_test_pattern test_pattern,
|
||||
const struct link_training_settings *p_link_settings,
|
||||
const unsigned char *p_custom_pattern,
|
||||
unsigned int cust_pattern_size)
|
||||
{
|
||||
if (link != NULL)
|
||||
dc_link_dp_set_test_pattern(
|
||||
link,
|
||||
test_pattern,
|
||||
p_link_settings,
|
||||
p_custom_pattern,
|
||||
cust_pattern_size);
|
||||
}
|
||||
|
||||
static void destruct(struct dc *dc)
|
||||
{
|
||||
dc_release_state(dc->current_state);
|
||||
|
@ -402,9 +467,6 @@ static void destruct(struct dc *dc)
|
|||
if (dc->ctx->created_bios)
|
||||
dal_bios_parser_destroy(&dc->ctx->dc_bios);
|
||||
|
||||
if (dc->ctx->logger)
|
||||
dal_logger_destroy(&dc->ctx->logger);
|
||||
|
||||
kfree(dc->ctx);
|
||||
dc->ctx = NULL;
|
||||
|
||||
|
@ -414,7 +476,7 @@ static void destruct(struct dc *dc)
|
|||
kfree(dc->bw_dceip);
|
||||
dc->bw_dceip = NULL;
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_DCN1_0
|
||||
#ifdef CONFIG_X86
|
||||
kfree(dc->dcn_soc);
|
||||
dc->dcn_soc = NULL;
|
||||
|
||||
|
@ -427,11 +489,10 @@ static void destruct(struct dc *dc)
|
|||
static bool construct(struct dc *dc,
|
||||
const struct dc_init_data *init_params)
|
||||
{
|
||||
struct dal_logger *logger;
|
||||
struct dc_context *dc_ctx;
|
||||
struct bw_calcs_dceip *dc_dceip;
|
||||
struct bw_calcs_vbios *dc_vbios;
|
||||
#ifdef CONFIG_DRM_AMD_DC_DCN1_0
|
||||
#ifdef CONFIG_X86
|
||||
struct dcn_soc_bounding_box *dcn_soc;
|
||||
struct dcn_ip_params *dcn_ip;
|
||||
#endif
|
||||
|
@ -453,7 +514,7 @@ static bool construct(struct dc *dc,
|
|||
}
|
||||
|
||||
dc->bw_vbios = dc_vbios;
|
||||
#ifdef CONFIG_DRM_AMD_DC_DCN1_0
|
||||
#ifdef CONFIG_X86
|
||||
dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
|
||||
if (!dcn_soc) {
|
||||
dm_error("%s: failed to create dcn_soc\n", __func__);
|
||||
|
@ -492,14 +553,7 @@ static bool construct(struct dc *dc,
|
|||
}
|
||||
|
||||
/* Create logger */
|
||||
logger = dal_logger_create(dc_ctx, init_params->log_mask);
|
||||
|
||||
if (!logger) {
|
||||
/* can *not* call logger. call base driver 'print error' */
|
||||
dm_error("%s: failed to create Logger!\n", __func__);
|
||||
goto fail;
|
||||
}
|
||||
dc_ctx->logger = logger;
|
||||
dc_ctx->dce_environment = init_params->dce_environment;
|
||||
|
||||
dc_version = resource_parse_asic_id(init_params->asic_id);
|
||||
|
@ -918,9 +972,7 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)
|
|||
for (i = 0; i < context->stream_count; i++) {
|
||||
struct dc_stream_state *stream = context->streams[i];
|
||||
|
||||
dc_stream_log(stream,
|
||||
dc->ctx->logger,
|
||||
LOG_DC);
|
||||
dc_stream_log(dc, stream);
|
||||
}
|
||||
|
||||
result = dc_commit_state_no_check(dc, context);
|
||||
|
|
|
@ -348,7 +348,7 @@ void context_clock_trace(
|
|||
struct dc *dc,
|
||||
struct dc_state *context)
|
||||
{
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
||||
#ifdef CONFIG_X86
|
||||
DC_LOGGER_INIT(dc->ctx->logger);
|
||||
CLOCK_TRACE("Current: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
|
||||
"dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
|
||||
|
|
|
@ -313,7 +313,7 @@ static enum signal_type get_basic_signal_type(
|
|||
* @brief
|
||||
* Check whether there is a dongle on DP connector
|
||||
*/
|
||||
static bool is_dp_sink_present(struct dc_link *link)
|
||||
bool dc_link_is_dp_sink_present(struct dc_link *link)
|
||||
{
|
||||
enum gpio_result gpio_result;
|
||||
uint32_t clock_pin = 0;
|
||||
|
@ -406,7 +406,7 @@ static enum signal_type link_detect_sink(
|
|||
* we assume signal is DVI; it could be corrected
|
||||
* to HDMI after dongle detection
|
||||
*/
|
||||
if (!is_dp_sink_present(link))
|
||||
if (!dm_helpers_is_dp_sink_present(link))
|
||||
result = SIGNAL_TYPE_DVI_SINGLE_LINK;
|
||||
}
|
||||
}
|
||||
|
@ -498,6 +498,10 @@ static bool detect_dp(
|
|||
sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
|
||||
link->type = dc_connection_mst_branch;
|
||||
|
||||
dal_ddc_service_set_transaction_type(
|
||||
link->ddc,
|
||||
sink_caps->transaction_type);
|
||||
|
||||
/*
|
||||
* This call will initiate MST topology discovery. Which
|
||||
* will detect MST ports and add new DRM connector DRM
|
||||
|
@ -525,6 +529,10 @@ static bool detect_dp(
|
|||
if (reason == DETECT_REASON_BOOT)
|
||||
boot = true;
|
||||
|
||||
dm_helpers_dp_update_branch_info(
|
||||
link->ctx,
|
||||
link);
|
||||
|
||||
if (!dm_helpers_dp_mst_start_top_mgr(
|
||||
link->ctx,
|
||||
link, boot)) {
|
||||
|
|
|
@ -33,6 +33,10 @@
|
|||
#include "include/vector.h"
|
||||
#include "core_types.h"
|
||||
#include "dc_link_ddc.h"
|
||||
#include "i2caux/engine.h"
|
||||
#include "i2caux/i2c_engine.h"
|
||||
#include "i2caux/aux_engine.h"
|
||||
#include "i2caux/i2caux.h"
|
||||
|
||||
#define AUX_POWER_UP_WA_DELAY 500
|
||||
#define I2C_OVER_AUX_DEFER_WA_DELAY 70
|
||||
|
@ -629,83 +633,62 @@ bool dal_ddc_service_query_ddc_data(
|
|||
return ret;
|
||||
}
|
||||
|
||||
enum ddc_result dal_ddc_service_read_dpcd_data(
|
||||
struct ddc_service *ddc,
|
||||
bool i2c,
|
||||
enum i2c_mot_mode mot,
|
||||
uint32_t address,
|
||||
uint8_t *data,
|
||||
uint32_t len,
|
||||
uint32_t *read)
|
||||
int dc_link_aux_transfer(struct ddc_service *ddc,
|
||||
unsigned int address,
|
||||
uint8_t *reply,
|
||||
void *buffer,
|
||||
unsigned int size,
|
||||
enum aux_transaction_type type,
|
||||
enum i2caux_transaction_action action)
|
||||
{
|
||||
struct aux_payload read_payload = {
|
||||
.i2c_over_aux = i2c,
|
||||
.write = false,
|
||||
.address = address,
|
||||
.length = len,
|
||||
.data = data,
|
||||
};
|
||||
struct aux_command command = {
|
||||
.payloads = &read_payload,
|
||||
.number_of_payloads = 1,
|
||||
.defer_delay = 0,
|
||||
.max_defer_write_retry = 0,
|
||||
.mot = mot
|
||||
};
|
||||
struct i2caux *i2caux = ddc->ctx->i2caux;
|
||||
struct ddc *ddc_pin = ddc->ddc_pin;
|
||||
struct aux_engine *engine;
|
||||
enum aux_channel_operation_result operation_result;
|
||||
struct aux_request_transaction_data aux_req;
|
||||
struct aux_reply_transaction_data aux_rep;
|
||||
uint8_t returned_bytes = 0;
|
||||
int res = -1;
|
||||
uint32_t status;
|
||||
|
||||
*read = 0;
|
||||
memset(&aux_req, 0, sizeof(aux_req));
|
||||
memset(&aux_rep, 0, sizeof(aux_rep));
|
||||
|
||||
if (len > DEFAULT_AUX_MAX_DATA_SIZE) {
|
||||
BREAK_TO_DEBUGGER();
|
||||
return DDC_RESULT_FAILED_INVALID_OPERATION;
|
||||
engine = i2caux->funcs->acquire_aux_engine(i2caux, ddc_pin);
|
||||
|
||||
aux_req.type = type;
|
||||
aux_req.action = action;
|
||||
|
||||
aux_req.address = address;
|
||||
aux_req.delay = 0;
|
||||
aux_req.length = size;
|
||||
aux_req.data = buffer;
|
||||
|
||||
engine->funcs->submit_channel_request(engine, &aux_req);
|
||||
operation_result = engine->funcs->get_channel_status(engine, &returned_bytes);
|
||||
|
||||
switch (operation_result) {
|
||||
case AUX_CHANNEL_OPERATION_SUCCEEDED:
|
||||
res = returned_bytes;
|
||||
|
||||
if (res <= size && res >= 0)
|
||||
res = engine->funcs->read_channel_reply(engine, size,
|
||||
buffer, reply,
|
||||
&status);
|
||||
|
||||
break;
|
||||
case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
|
||||
res = 0;
|
||||
break;
|
||||
case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN:
|
||||
case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
|
||||
case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
|
||||
res = -1;
|
||||
break;
|
||||
}
|
||||
|
||||
if (dal_i2caux_submit_aux_command(
|
||||
ddc->ctx->i2caux,
|
||||
ddc->ddc_pin,
|
||||
&command)) {
|
||||
*read = command.payloads->length;
|
||||
return DDC_RESULT_SUCESSFULL;
|
||||
}
|
||||
|
||||
return DDC_RESULT_FAILED_OPERATION;
|
||||
}
|
||||
|
||||
enum ddc_result dal_ddc_service_write_dpcd_data(
|
||||
struct ddc_service *ddc,
|
||||
bool i2c,
|
||||
enum i2c_mot_mode mot,
|
||||
uint32_t address,
|
||||
const uint8_t *data,
|
||||
uint32_t len)
|
||||
{
|
||||
struct aux_payload write_payload = {
|
||||
.i2c_over_aux = i2c,
|
||||
.write = true,
|
||||
.address = address,
|
||||
.length = len,
|
||||
.data = (uint8_t *)data,
|
||||
};
|
||||
struct aux_command command = {
|
||||
.payloads = &write_payload,
|
||||
.number_of_payloads = 1,
|
||||
.defer_delay = 0,
|
||||
.max_defer_write_retry = 0,
|
||||
.mot = mot
|
||||
};
|
||||
|
||||
if (len > DEFAULT_AUX_MAX_DATA_SIZE) {
|
||||
BREAK_TO_DEBUGGER();
|
||||
return DDC_RESULT_FAILED_INVALID_OPERATION;
|
||||
}
|
||||
|
||||
if (dal_i2caux_submit_aux_command(
|
||||
ddc->ctx->i2caux,
|
||||
ddc->ddc_pin,
|
||||
&command))
|
||||
return DDC_RESULT_SUCESSFULL;
|
||||
|
||||
return DDC_RESULT_FAILED_OPERATION;
|
||||
i2caux->funcs->release_engine(i2caux, &engine->base);
|
||||
return res;
|
||||
}
|
||||
|
||||
/*test only function*/
|
||||
|
|
|
@ -39,7 +39,7 @@ static bool decide_fallback_link_setting(
|
|||
struct dc_link_settings initial_link_settings,
|
||||
struct dc_link_settings *current_link_setting,
|
||||
enum link_training_result training_result);
|
||||
static struct dc_link_settings get_common_supported_link_settings (
|
||||
static struct dc_link_settings get_common_supported_link_settings(
|
||||
struct dc_link_settings link_setting_a,
|
||||
struct dc_link_settings link_setting_b);
|
||||
|
||||
|
@ -94,8 +94,8 @@ static void dpcd_set_link_settings(
|
|||
uint8_t rate = (uint8_t)
|
||||
(lt_settings->link_settings.link_rate);
|
||||
|
||||
union down_spread_ctrl downspread = {{0}};
|
||||
union lane_count_set lane_count_set = {{0}};
|
||||
union down_spread_ctrl downspread = { {0} };
|
||||
union lane_count_set lane_count_set = { {0} };
|
||||
uint8_t link_set_buffer[2];
|
||||
|
||||
downspread.raw = (uint8_t)
|
||||
|
@ -165,11 +165,11 @@ static void dpcd_set_lt_pattern_and_lane_settings(
|
|||
const struct link_training_settings *lt_settings,
|
||||
enum hw_dp_training_pattern pattern)
|
||||
{
|
||||
union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = {{{0}}};
|
||||
union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = { { {0} } };
|
||||
const uint32_t dpcd_base_lt_offset =
|
||||
DP_TRAINING_PATTERN_SET;
|
||||
uint8_t dpcd_lt_buffer[5] = {0};
|
||||
union dpcd_training_pattern dpcd_pattern = {{0}};
|
||||
union dpcd_training_pattern dpcd_pattern = { {0} };
|
||||
uint32_t lane;
|
||||
uint32_t size_in_bytes;
|
||||
bool edp_workaround = false; /* TODO link_prop.INTERNAL */
|
||||
|
@ -233,7 +233,7 @@ static void dpcd_set_lt_pattern_and_lane_settings(
|
|||
link,
|
||||
DP_TRAINING_PATTERN_SET,
|
||||
&dpcd_pattern.raw,
|
||||
sizeof(dpcd_pattern.raw) );
|
||||
sizeof(dpcd_pattern.raw));
|
||||
|
||||
core_link_write_dpcd(
|
||||
link,
|
||||
|
@ -247,7 +247,7 @@ static void dpcd_set_lt_pattern_and_lane_settings(
|
|||
link,
|
||||
dpcd_base_lt_offset,
|
||||
dpcd_lt_buffer,
|
||||
size_in_bytes + sizeof(dpcd_pattern.raw) );
|
||||
size_in_bytes + sizeof(dpcd_pattern.raw));
|
||||
|
||||
link->cur_lane_setting = lt_settings->lane_settings[0];
|
||||
}
|
||||
|
@ -429,8 +429,8 @@ static void get_lane_status_and_drive_settings(
|
|||
struct link_training_settings *req_settings)
|
||||
{
|
||||
uint8_t dpcd_buf[6] = {0};
|
||||
union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {{{0}}};
|
||||
struct link_training_settings request_settings = {{0}};
|
||||
union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } };
|
||||
struct link_training_settings request_settings = { {0} };
|
||||
uint32_t lane;
|
||||
|
||||
memset(req_settings, '\0', sizeof(struct link_training_settings));
|
||||
|
@ -652,7 +652,7 @@ static bool perform_post_lt_adj_req_sequence(
|
|||
|
||||
if (req_drv_setting_changed) {
|
||||
update_drive_settings(
|
||||
lt_settings,req_settings);
|
||||
lt_settings, req_settings);
|
||||
|
||||
dc_link_dp_set_drive_settings(link,
|
||||
lt_settings);
|
||||
|
@ -725,8 +725,8 @@ static enum link_training_result perform_channel_equalization_sequence(
|
|||
enum hw_dp_training_pattern hw_tr_pattern;
|
||||
uint32_t retries_ch_eq;
|
||||
enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
|
||||
union lane_align_status_updated dpcd_lane_status_updated = {{0}};
|
||||
union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {{{0}}};
|
||||
union lane_align_status_updated dpcd_lane_status_updated = { {0} };
|
||||
union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } };
|
||||
|
||||
hw_tr_pattern = get_supported_tp(link);
|
||||
|
||||
|
@ -1028,6 +1028,9 @@ enum link_training_result dc_link_dp_perform_link_training(
|
|||
lt_settings.lane_settings[0].VOLTAGE_SWING,
|
||||
lt_settings.lane_settings[0].PRE_EMPHASIS);
|
||||
|
||||
if (status != LINK_TRAINING_SUCCESS)
|
||||
link->ctx->dc->debug.debug_data.ltFailCount++;
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -1183,7 +1186,7 @@ bool dp_hbr_verify_link_cap(
|
|||
return success;
|
||||
}
|
||||
|
||||
static struct dc_link_settings get_common_supported_link_settings (
|
||||
static struct dc_link_settings get_common_supported_link_settings(
|
||||
struct dc_link_settings link_setting_a,
|
||||
struct dc_link_settings link_setting_b)
|
||||
{
|
||||
|
@ -1429,6 +1432,7 @@ static uint32_t bandwidth_in_kbps_from_link_settings(
|
|||
|
||||
uint32_t lane_count = link_setting->lane_count;
|
||||
uint32_t kbps = link_rate_in_kbps;
|
||||
|
||||
kbps *= lane_count;
|
||||
kbps *= 8; /* 8 bits per byte*/
|
||||
|
||||
|
@ -1446,9 +1450,9 @@ bool dp_validate_mode_timing(
|
|||
const struct dc_link_settings *link_setting;
|
||||
|
||||
/*always DP fail safe mode*/
|
||||
if (timing->pix_clk_khz == (uint32_t)25175 &&
|
||||
timing->h_addressable == (uint32_t)640 &&
|
||||
timing->v_addressable == (uint32_t)480)
|
||||
if (timing->pix_clk_khz == (uint32_t) 25175 &&
|
||||
timing->h_addressable == (uint32_t) 640 &&
|
||||
timing->v_addressable == (uint32_t) 480)
|
||||
return true;
|
||||
|
||||
/* We always use verified link settings */
|
||||
|
@ -1996,12 +2000,16 @@ static void handle_automated_test(struct dc_link *link)
|
|||
sizeof(test_response));
|
||||
}
|
||||
|
||||
bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data)
|
||||
bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss)
|
||||
{
|
||||
union hpd_irq_data hpd_irq_dpcd_data = {{{{0}}}};
|
||||
union hpd_irq_data hpd_irq_dpcd_data = { { { {0} } } };
|
||||
union device_service_irq device_service_clear = { { 0 } };
|
||||
enum dc_status result;
|
||||
|
||||
bool status = false;
|
||||
|
||||
if (out_link_loss)
|
||||
*out_link_loss = false;
|
||||
/* For use cases related to down stream connection status change,
|
||||
* PSR and device auto test, refer to function handle_sst_hpd_irq
|
||||
* in DAL2.1*/
|
||||
|
@ -2076,6 +2084,8 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
|
|||
true, LINK_TRAINING_ATTEMPTS);
|
||||
|
||||
status = false;
|
||||
if (out_link_loss)
|
||||
*out_link_loss = true;
|
||||
}
|
||||
|
||||
if (link->type == dc_connection_active_dongle &&
|
||||
|
@ -2262,6 +2272,11 @@ static void get_active_converter_info(
|
|||
|
||||
link->dpcd_caps.branch_hw_revision =
|
||||
dp_hw_fw_revision.ieee_hw_rev;
|
||||
|
||||
memmove(
|
||||
link->dpcd_caps.branch_fw_revision,
|
||||
dp_hw_fw_revision.ieee_fw_rev,
|
||||
sizeof(dp_hw_fw_revision.ieee_fw_rev));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2317,6 +2332,7 @@ static bool retrieve_link_cap(struct dc_link *link)
|
|||
enum dc_status status = DC_ERROR_UNEXPECTED;
|
||||
uint32_t read_dpcd_retry_cnt = 3;
|
||||
int i;
|
||||
struct dp_sink_hw_fw_revision dp_hw_fw_revision;
|
||||
|
||||
memset(dpcd_data, '\0', sizeof(dpcd_data));
|
||||
memset(&down_strm_port_count,
|
||||
|
@ -2408,6 +2424,25 @@ static bool retrieve_link_cap(struct dc_link *link)
|
|||
(sink_id.ieee_oui[1] << 8) +
|
||||
(sink_id.ieee_oui[2]);
|
||||
|
||||
memmove(
|
||||
link->dpcd_caps.sink_dev_id_str,
|
||||
sink_id.ieee_device_id,
|
||||
sizeof(sink_id.ieee_device_id));
|
||||
|
||||
core_link_read_dpcd(
|
||||
link,
|
||||
DP_SINK_HW_REVISION_START,
|
||||
(uint8_t *)&dp_hw_fw_revision,
|
||||
sizeof(dp_hw_fw_revision));
|
||||
|
||||
link->dpcd_caps.sink_hw_revision =
|
||||
dp_hw_fw_revision.ieee_hw_rev;
|
||||
|
||||
memmove(
|
||||
link->dpcd_caps.sink_fw_revision,
|
||||
dp_hw_fw_revision.ieee_fw_rev,
|
||||
sizeof(dp_hw_fw_revision.ieee_fw_rev));
|
||||
|
||||
/* Connectivity log: detection */
|
||||
CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: ");
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@
|
|||
#include "dce100/dce100_resource.h"
|
||||
#include "dce110/dce110_resource.h"
|
||||
#include "dce112/dce112_resource.h"
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
||||
#ifdef CONFIG_X86
|
||||
#include "dcn10/dcn10_resource.h"
|
||||
#endif
|
||||
#include "dce120/dce120_resource.h"
|
||||
|
@ -85,7 +85,7 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
|
|||
case FAMILY_AI:
|
||||
dc_version = DCE_VERSION_12_0;
|
||||
break;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
||||
#ifdef CONFIG_X86
|
||||
case FAMILY_RV:
|
||||
dc_version = DCN_VERSION_1_0;
|
||||
break;
|
||||
|
@ -136,7 +136,7 @@ struct resource_pool *dc_create_resource_pool(
|
|||
num_virtual_links, dc);
|
||||
break;
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
||||
#ifdef CONFIG_X86
|
||||
case DCN_VERSION_1_0:
|
||||
res_pool = dcn10_create_resource_pool(
|
||||
num_virtual_links, dc);
|
||||
|
@ -1213,7 +1213,7 @@ static struct pipe_ctx *acquire_free_pipe_for_stream(
|
|||
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
||||
#ifdef CONFIG_X86
|
||||
static int acquire_first_split_pipe(
|
||||
struct resource_context *res_ctx,
|
||||
const struct resource_pool *pool,
|
||||
|
@ -1284,7 +1284,7 @@ bool dc_add_plane_to_context(
|
|||
|
||||
free_pipe = acquire_free_pipe_for_stream(context, pool, stream);
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
||||
#ifdef CONFIG_X86
|
||||
if (!free_pipe) {
|
||||
int pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);
|
||||
if (pipe_idx >= 0)
|
||||
|
@ -1705,8 +1705,8 @@ enum dc_status dc_add_stream_to_ctx(
|
|||
struct dc_context *dc_ctx = dc->ctx;
|
||||
enum dc_status res;
|
||||
|
||||
if (new_ctx->stream_count >= dc->res_pool->pipe_count) {
|
||||
DC_ERROR("Max streams reached, can add stream %p !\n", stream);
|
||||
if (new_ctx->stream_count >= dc->res_pool->timing_generator_count) {
|
||||
DC_ERROR("Max streams reached, can't add stream %p !\n", stream);
|
||||
return DC_ERROR_UNEXPECTED;
|
||||
}
|
||||
|
||||
|
@ -1882,7 +1882,7 @@ enum dc_status resource_map_pool_resources(
|
|||
/* acquire new resources */
|
||||
pipe_idx = acquire_first_free_pipe(&context->res_ctx, pool, stream);
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_DCN1_0
|
||||
#ifdef CONFIG_X86
|
||||
if (pipe_idx < 0)
|
||||
pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);
|
||||
#endif
|
||||
|
|
|
@ -30,6 +30,8 @@
|
|||
#include "ipp.h"
|
||||
#include "timing_generator.h"
|
||||
|
||||
#define DC_LOGGER dc->ctx->logger
|
||||
|
||||
/*******************************************************************************
|
||||
* Private functions
|
||||
******************************************************************************/
|
||||
|
@ -212,6 +214,8 @@ bool dc_stream_set_cursor_attributes(
|
|||
}
|
||||
|
||||
core_dc->hwss.set_cursor_attribute(pipe_ctx);
|
||||
if (core_dc->hwss.set_cursor_sdr_white_level)
|
||||
core_dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
|
||||
}
|
||||
|
||||
if (pipe_to_program)
|
||||
|
@ -317,16 +321,10 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
|
|||
return ret;
|
||||
}
|
||||
|
||||
|
||||
void dc_stream_log(
|
||||
const struct dc_stream_state *stream,
|
||||
struct dal_logger *dm_logger,
|
||||
enum dc_log_type log_type)
|
||||
void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream)
|
||||
{
|
||||
|
||||
dm_logger_write(dm_logger,
|
||||
log_type,
|
||||
"core_stream 0x%x: src: %d, %d, %d, %d; dst: %d, %d, %d, %d, colorSpace:%d\n",
|
||||
DC_LOG_DC(
|
||||
"core_stream 0x%p: src: %d, %d, %d, %d; dst: %d, %d, %d, %d, colorSpace:%d\n",
|
||||
stream,
|
||||
stream->src.x,
|
||||
stream->src.y,
|
||||
|
@ -337,21 +335,18 @@ void dc_stream_log(
|
|||
stream->dst.width,
|
||||
stream->dst.height,
|
||||
stream->output_color_space);
|
||||
dm_logger_write(dm_logger,
|
||||
log_type,
|
||||
DC_LOG_DC(
|
||||
"\tpix_clk_khz: %d, h_total: %d, v_total: %d, pixelencoder:%d, displaycolorDepth:%d\n",
|
||||
stream->timing.pix_clk_khz,
|
||||
stream->timing.h_total,
|
||||
stream->timing.v_total,
|
||||
stream->timing.pixel_encoding,
|
||||
stream->timing.display_color_depth);
|
||||
dm_logger_write(dm_logger,
|
||||
log_type,
|
||||
DC_LOG_DC(
|
||||
"\tsink name: %s, serial: %d\n",
|
||||
stream->sink->edid_caps.display_name,
|
||||
stream->sink->edid_caps.serial_number);
|
||||
dm_logger_write(dm_logger,
|
||||
log_type,
|
||||
DC_LOG_DC(
|
||||
"\tlink: %d\n",
|
||||
stream->sink->link->link_index);
|
||||
}
|
||||
|
|
|
@ -38,7 +38,7 @@
|
|||
#include "inc/compressor.h"
|
||||
#include "dml/display_mode_lib.h"
|
||||
|
||||
#define DC_VER "3.1.52"
|
||||
#define DC_VER "3.1.56"
|
||||
|
||||
#define MAX_SURFACES 3
|
||||
#define MAX_STREAMS 6
|
||||
|
@ -169,6 +169,12 @@ struct dc_config {
|
|||
bool disable_disp_pll_sharing;
|
||||
};
|
||||
|
||||
enum visual_confirm {
|
||||
VISUAL_CONFIRM_DISABLE = 0,
|
||||
VISUAL_CONFIRM_SURFACE = 1,
|
||||
VISUAL_CONFIRM_HDR = 2,
|
||||
};
|
||||
|
||||
enum dcc_option {
|
||||
DCC_ENABLE = 0,
|
||||
DCC_DISABLE = 1,
|
||||
|
@ -202,7 +208,7 @@ struct dc_clocks {
|
|||
};
|
||||
|
||||
struct dc_debug {
|
||||
bool surface_visual_confirm;
|
||||
enum visual_confirm visual_confirm;
|
||||
bool sanity_checks;
|
||||
bool max_disp_clk;
|
||||
bool surface_trace;
|
||||
|
@ -249,7 +255,15 @@ struct dc_debug {
|
|||
bool always_use_regamma;
|
||||
bool p010_mpo_support;
|
||||
bool recovery_enabled;
|
||||
bool avoid_vbios_exec_table;
|
||||
bool scl_reset_length10;
|
||||
bool hdmi20_disable;
|
||||
|
||||
struct {
|
||||
uint32_t ltFailCount;
|
||||
uint32_t i2cErrorCount;
|
||||
uint32_t auxErrorCount;
|
||||
} debug_data;
|
||||
};
|
||||
struct dc_state;
|
||||
struct resource_pool;
|
||||
|
@ -275,7 +289,7 @@ struct dc {
|
|||
/* Inputs into BW and WM calculations. */
|
||||
struct bw_calcs_dceip *bw_dceip;
|
||||
struct bw_calcs_vbios *bw_vbios;
|
||||
#ifdef CONFIG_DRM_AMD_DC_DCN1_0
|
||||
#ifdef CONFIG_X86
|
||||
struct dcn_soc_bounding_box *dcn_soc;
|
||||
struct dcn_ip_params *dcn_ip;
|
||||
struct display_mode_lib dml;
|
||||
|
@ -384,7 +398,8 @@ enum dc_transfer_func_predefined {
|
|||
TRANSFER_FUNCTION_LINEAR,
|
||||
TRANSFER_FUNCTION_UNITY,
|
||||
TRANSFER_FUNCTION_HLG,
|
||||
TRANSFER_FUNCTION_HLG12
|
||||
TRANSFER_FUNCTION_HLG12,
|
||||
TRANSFER_FUNCTION_GAMMA22
|
||||
};
|
||||
|
||||
struct dc_transfer_func {
|
||||
|
@ -627,9 +642,14 @@ struct dpcd_caps {
|
|||
struct dc_dongle_caps dongle_caps;
|
||||
|
||||
uint32_t sink_dev_id;
|
||||
int8_t sink_dev_id_str[6];
|
||||
int8_t sink_hw_revision;
|
||||
int8_t sink_fw_revision[2];
|
||||
|
||||
uint32_t branch_dev_id;
|
||||
int8_t branch_dev_name[6];
|
||||
int8_t branch_hw_revision;
|
||||
int8_t branch_fw_revision[2];
|
||||
|
||||
bool allow_invalid_MSA_timing_param;
|
||||
bool panel_mode_edp;
|
||||
|
|
|
@ -255,3 +255,54 @@ uint32_t generic_reg_wait(const struct dc_context *ctx,
|
|||
|
||||
return reg_val;
|
||||
}
|
||||
|
||||
void generic_write_indirect_reg(const struct dc_context *ctx,
|
||||
uint32_t addr_index, uint32_t addr_data,
|
||||
uint32_t index, uint32_t data)
|
||||
{
|
||||
dm_write_reg(ctx, addr_index, index);
|
||||
dm_write_reg(ctx, addr_data, data);
|
||||
}
|
||||
|
||||
uint32_t generic_read_indirect_reg(const struct dc_context *ctx,
|
||||
uint32_t addr_index, uint32_t addr_data,
|
||||
uint32_t index)
|
||||
{
|
||||
uint32_t value = 0;
|
||||
|
||||
dm_write_reg(ctx, addr_index, index);
|
||||
value = dm_read_reg(ctx, addr_data);
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
|
||||
uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx,
|
||||
uint32_t addr_index, uint32_t addr_data,
|
||||
uint32_t index, uint32_t reg_val, int n,
|
||||
uint8_t shift1, uint32_t mask1, uint32_t field_value1,
|
||||
...)
|
||||
{
|
||||
uint32_t shift, mask, field_value;
|
||||
int i = 1;
|
||||
|
||||
va_list ap;
|
||||
|
||||
va_start(ap, field_value1);
|
||||
|
||||
reg_val = set_reg_field_value_ex(reg_val, field_value1, mask1, shift1);
|
||||
|
||||
while (i < n) {
|
||||
shift = va_arg(ap, uint32_t);
|
||||
mask = va_arg(ap, uint32_t);
|
||||
field_value = va_arg(ap, uint32_t);
|
||||
|
||||
reg_val = set_reg_field_value_ex(reg_val, field_value, mask, shift);
|
||||
i++;
|
||||
}
|
||||
|
||||
generic_write_indirect_reg(ctx, addr_index, addr_data, index, reg_val);
|
||||
va_end(ap);
|
||||
|
||||
return reg_val;
|
||||
}
|
||||
|
|
|
@ -404,9 +404,11 @@ struct dc_cursor_position {
|
|||
struct dc_cursor_mi_param {
|
||||
unsigned int pixel_clk_khz;
|
||||
unsigned int ref_clk_khz;
|
||||
unsigned int viewport_x_start;
|
||||
unsigned int viewport_width;
|
||||
struct rect viewport;
|
||||
struct fixed31_32 h_scale_ratio;
|
||||
struct fixed31_32 v_scale_ratio;
|
||||
enum dc_rotation_angle rotation;
|
||||
bool mirror;
|
||||
};
|
||||
|
||||
/* IPP related types */
|
||||
|
@ -490,6 +492,7 @@ struct dc_cursor_attributes {
|
|||
uint32_t height;
|
||||
|
||||
enum dc_cursor_color_format color_format;
|
||||
uint32_t sdr_white_level; // for boosting (SDR) cursor in HDR mode
|
||||
|
||||
/* In case we support HW Cursor rotation in the future */
|
||||
enum dc_rotation_angle rotation_angle;
|
||||
|
@ -497,6 +500,11 @@ struct dc_cursor_attributes {
|
|||
union dc_cursor_attribute_flags attribute_flags;
|
||||
};
|
||||
|
||||
struct dpp_cursor_attributes {
|
||||
int bias;
|
||||
int scale;
|
||||
};
|
||||
|
||||
/* OPP */
|
||||
|
||||
enum dc_color_space {
|
||||
|
|
|
@ -172,7 +172,7 @@ bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
|
|||
* false - no change in Downstream port status. No further action required
|
||||
* from DM. */
|
||||
bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link,
|
||||
union hpd_irq_data *hpd_irq_dpcd_data);
|
||||
union hpd_irq_data *hpd_irq_dpcd_data, bool *out_link_loss);
|
||||
|
||||
struct dc_sink_init_data;
|
||||
|
||||
|
@ -210,10 +210,29 @@ bool dc_link_dp_set_test_pattern(
|
|||
|
||||
void dc_link_enable_hpd_filter(struct dc_link *link, bool enable);
|
||||
|
||||
bool dc_link_is_dp_sink_present(struct dc_link *link);
|
||||
|
||||
/*
|
||||
* DPCD access interfaces
|
||||
*/
|
||||
|
||||
void dc_link_set_drive_settings(struct dc *dc,
|
||||
struct link_training_settings *lt_settings,
|
||||
const struct dc_link *link);
|
||||
void dc_link_perform_link_training(struct dc *dc,
|
||||
struct dc_link_settings *link_setting,
|
||||
bool skip_video_pattern);
|
||||
void dc_link_set_preferred_link_settings(struct dc *dc,
|
||||
struct dc_link_settings *link_setting,
|
||||
struct dc_link *link);
|
||||
void dc_link_enable_hpd(const struct dc_link *link);
|
||||
void dc_link_disable_hpd(const struct dc_link *link);
|
||||
void dc_link_set_test_pattern(struct dc_link *link,
|
||||
enum dp_test_pattern test_pattern,
|
||||
const struct link_training_settings *p_link_settings,
|
||||
const unsigned char *p_custom_pattern,
|
||||
unsigned int cust_pattern_size);
|
||||
|
||||
bool dc_submit_i2c(
|
||||
struct dc *dc,
|
||||
uint32_t link_index,
|
||||
|
|
|
@ -100,6 +100,7 @@ struct dc_stream_state {
|
|||
|
||||
struct dc_cursor_attributes cursor_attributes;
|
||||
struct dc_cursor_position cursor_position;
|
||||
uint32_t sdr_white_level; // for boosting (SDR) cursor in HDR mode
|
||||
|
||||
/* from stream struct */
|
||||
struct kref refcount;
|
||||
|
@ -147,10 +148,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
|
|||
/*
|
||||
* Log the current stream state.
|
||||
*/
|
||||
void dc_stream_log(
|
||||
const struct dc_stream_state *stream,
|
||||
struct dal_logger *dc_logger,
|
||||
enum dc_log_type log_type);
|
||||
void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream);
|
||||
|
||||
uint8_t dc_get_current_stream_count(struct dc *dc);
|
||||
struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i);
|
||||
|
@ -258,6 +256,7 @@ bool dc_stream_set_cursor_position(
|
|||
struct dc_stream_state *stream,
|
||||
const struct dc_cursor_position *position);
|
||||
|
||||
|
||||
bool dc_stream_adjust_vmin_vmax(struct dc *dc,
|
||||
struct dc_stream_state **stream,
|
||||
int num_streams,
|
||||
|
|
|
@ -77,8 +77,6 @@ struct dc_context {
|
|||
struct dc *dc;
|
||||
|
||||
void *driver_context; /* e.g. amdgpu_device */
|
||||
|
||||
struct dal_logger *logger;
|
||||
void *cgs_device;
|
||||
|
||||
enum dce_environment dce_environment;
|
||||
|
@ -194,6 +192,7 @@ union display_content_support {
|
|||
|
||||
struct dc_panel_patch {
|
||||
unsigned int dppowerup_delay;
|
||||
unsigned int extra_t12_ms;
|
||||
};
|
||||
|
||||
struct dc_edid_caps {
|
||||
|
|
|
@ -592,7 +592,7 @@ static uint32_t dce110_get_pix_clk_dividers(
|
|||
case DCE_VERSION_11_2:
|
||||
case DCE_VERSION_11_22:
|
||||
case DCE_VERSION_12_0:
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
||||
#ifdef CONFIG_X86
|
||||
case DCN_VERSION_1_0:
|
||||
#endif
|
||||
|
||||
|
@ -909,7 +909,7 @@ static bool dce110_program_pix_clk(
|
|||
struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source);
|
||||
struct bp_pixel_clock_parameters bp_pc_params = {0};
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
||||
#ifdef CONFIG_X86
|
||||
if (IS_FPGA_MAXIMUS_DC(clock_source->ctx->dce_environment)) {
|
||||
unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0;
|
||||
unsigned dp_dto_ref_kHz = 700000;
|
||||
|
@ -982,7 +982,7 @@ static bool dce110_program_pix_clk(
|
|||
case DCE_VERSION_11_2:
|
||||
case DCE_VERSION_11_22:
|
||||
case DCE_VERSION_12_0:
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
||||
#ifdef CONFIG_X86
|
||||
case DCN_VERSION_1_0:
|
||||
#endif
|
||||
|
||||
|
|
|
@ -55,7 +55,7 @@
|
|||
CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_DCCG_DEEP_COLOR_CNTL, mask_sh),\
|
||||
CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE, mask_sh)
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
||||
#ifdef CONFIG_X86
|
||||
|
||||
#define CS_COMMON_REG_LIST_DCN1_0(index, pllid) \
|
||||
SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\
|
||||
|
|
|
@ -30,7 +30,7 @@
|
|||
#include "bios_parser_interface.h"
|
||||
#include "dc.h"
|
||||
#include "dmcu.h"
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
||||
#ifdef CONFIG_X86
|
||||
#include "dcn_calcs.h"
|
||||
#endif
|
||||
#include "core_types.h"
|
||||
|
@ -478,7 +478,7 @@ static void dce12_update_clocks(struct dccg *dccg,
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_DCN1_0
|
||||
#ifdef CONFIG_X86
|
||||
static int dcn1_determine_dppclk_threshold(struct dccg *dccg, struct dc_clocks *new_clocks)
|
||||
{
|
||||
bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
|
||||
|
@ -666,7 +666,7 @@ static void dce_update_clocks(struct dccg *dccg,
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_DCN1_0
|
||||
#ifdef CONFIG_X86
|
||||
static const struct display_clock_funcs dcn1_funcs = {
|
||||
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
|
||||
.set_dispclk = dce112_set_clock,
|
||||
|
@ -821,7 +821,7 @@ struct dccg *dce120_dccg_create(struct dc_context *ctx)
|
|||
return &clk_dce->base;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_DCN1_0
|
||||
#ifdef CONFIG_X86
|
||||
struct dccg *dcn1_dccg_create(struct dc_context *ctx)
|
||||
{
|
||||
struct dc_debug *debug = &ctx->dc->debug;
|
||||
|
|
|
@ -44,18 +44,14 @@
|
|||
CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, mask_sh)
|
||||
|
||||
#define CLK_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(mask_sh) \
|
||||
CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_WDIVIDER, mask_sh),\
|
||||
CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, mask_sh),\
|
||||
CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, mask_sh),\
|
||||
CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_CHG_DONE, mask_sh)
|
||||
CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, mask_sh)
|
||||
|
||||
#define CLK_REG_FIELD_LIST(type) \
|
||||
type DPREFCLK_SRC_SEL; \
|
||||
type DENTIST_DPREFCLK_WDIVIDER; \
|
||||
type DENTIST_DISPCLK_WDIVIDER; \
|
||||
type DENTIST_DPPCLK_WDIVIDER; \
|
||||
type DENTIST_DISPCLK_CHG_DONE; \
|
||||
type DENTIST_DPPCLK_CHG_DONE;
|
||||
type DENTIST_DISPCLK_CHG_DONE;
|
||||
|
||||
struct dccg_shift {
|
||||
CLK_REG_FIELD_LIST(uint8_t)
|
||||
|
@ -115,7 +111,7 @@ struct dccg *dce112_dccg_create(
|
|||
|
||||
struct dccg *dce120_dccg_create(struct dc_context *ctx);
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_DCN1_0
|
||||
#ifdef CONFIG_X86
|
||||
struct dccg *dcn1_dccg_create(struct dc_context *ctx);
|
||||
#endif
|
||||
|
||||
|
|
|
@ -314,7 +314,7 @@ static void dce_get_psr_wait_loop(
|
|||
return;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
||||
#ifdef CONFIG_X86
|
||||
static void dcn10_get_dmcu_state(struct dmcu *dmcu)
|
||||
{
|
||||
struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
|
||||
|
@ -735,7 +735,7 @@ static const struct dmcu_funcs dce_funcs = {
|
|||
.is_dmcu_initialized = dce_is_dmcu_initialized
|
||||
};
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
||||
#ifdef CONFIG_X86
|
||||
static const struct dmcu_funcs dcn10_funcs = {
|
||||
.dmcu_init = dcn10_dmcu_init,
|
||||
.load_iram = dcn10_dmcu_load_iram,
|
||||
|
@ -787,7 +787,7 @@ struct dmcu *dce_dmcu_create(
|
|||
return &dmcu_dce->base;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
||||
#ifdef CONFIG_X86
|
||||
struct dmcu *dcn10_dmcu_create(
|
||||
struct dc_context *ctx,
|
||||
const struct dce_dmcu_registers *regs,
|
||||
|
|
|
@ -147,6 +147,7 @@
|
|||
SR(DCCG_GATE_DISABLE_CNTL2), \
|
||||
SR(DCFCLK_CNTL),\
|
||||
SR(DCFCLK_CNTL), \
|
||||
SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \
|
||||
/* todo: get these from GVM instead of reading registers ourselves */\
|
||||
MMHUB_SR(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32),\
|
||||
MMHUB_SR(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32),\
|
||||
|
@ -275,6 +276,8 @@ struct dce_hwseq_registers {
|
|||
uint32_t MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB;
|
||||
uint32_t MC_VM_SYSTEM_APERTURE_LOW_ADDR;
|
||||
uint32_t MC_VM_SYSTEM_APERTURE_HIGH_ADDR;
|
||||
uint32_t AZALIA_AUDIO_DTO;
|
||||
uint32_t AZALIA_CONTROLLER_CLOCK_GATING;
|
||||
};
|
||||
/* set field name */
|
||||
#define HWS_SF(blk_name, reg_name, field_name, post_fix)\
|
||||
|
@ -361,7 +364,8 @@ struct dce_hwseq_registers {
|
|||
HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, OTG0_),\
|
||||
HWS_SF1(OTG0_, PHYPLL_PIXEL_RATE_CNTL, PHYPLL_PIXEL_RATE_SOURCE, mask_sh), \
|
||||
HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, mask_sh), \
|
||||
HWS_SF(, DCFCLK_CNTL, DCFCLK_GATE_DIS, mask_sh)
|
||||
HWS_SF(, DCFCLK_CNTL, DCFCLK_GATE_DIS, mask_sh), \
|
||||
HWS_SF(, DC_MEM_GLOBAL_PWR_REQ_CNTL, DC_MEM_GLOBAL_PWR_REQ_DIS, mask_sh)
|
||||
|
||||
#define HWSEQ_DCN1_MASK_SH_LIST(mask_sh)\
|
||||
HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
|
||||
|
@ -500,7 +504,8 @@ struct dce_hwseq_registers {
|
|||
type D1VGA_MODE_ENABLE; \
|
||||
type D2VGA_MODE_ENABLE; \
|
||||
type D3VGA_MODE_ENABLE; \
|
||||
type D4VGA_MODE_ENABLE;
|
||||
type D4VGA_MODE_ENABLE; \
|
||||
type AZALIA_AUDIO_DTO_MODULE;
|
||||
|
||||
struct dce_hwseq_shift {
|
||||
HWSEQ_REG_FIELD_LIST(uint8_t)
|
||||
|
|
|
@ -646,6 +646,9 @@ static bool dce110_link_encoder_validate_hdmi_output(
|
|||
if (!enc110->base.features.flags.bits.HDMI_6GB_EN &&
|
||||
adjusted_pix_clk_khz >= 300000)
|
||||
return false;
|
||||
if (enc110->base.ctx->dc->debug.hdmi20_disable &&
|
||||
crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -773,6 +776,9 @@ void dce110_link_encoder_construct(
|
|||
__func__,
|
||||
result);
|
||||
}
|
||||
if (enc110->base.ctx->dc->debug.hdmi20_disable) {
|
||||
enc110->base.features.flags.bits.HDMI_6GB_EN = 0;
|
||||
}
|
||||
}
|
||||
|
||||
bool dce110_link_encoder_validate_output_with_stream(
|
||||
|
|
|
@ -729,7 +729,7 @@ static bool dce_mi_program_surface_flip_and_addr(
|
|||
return true;
|
||||
}
|
||||
|
||||
static struct mem_input_funcs dce_mi_funcs = {
|
||||
static const struct mem_input_funcs dce_mi_funcs = {
|
||||
.mem_input_program_display_marks = dce_mi_program_display_marks,
|
||||
.allocate_mem_input = dce_mi_allocate_dmif,
|
||||
.free_mem_input = dce_mi_free_dmif,
|
||||
|
@ -741,6 +741,29 @@ static struct mem_input_funcs dce_mi_funcs = {
|
|||
.mem_input_is_flip_pending = dce_mi_is_flip_pending
|
||||
};
|
||||
|
||||
static const struct mem_input_funcs dce112_mi_funcs = {
|
||||
.mem_input_program_display_marks = dce112_mi_program_display_marks,
|
||||
.allocate_mem_input = dce_mi_allocate_dmif,
|
||||
.free_mem_input = dce_mi_free_dmif,
|
||||
.mem_input_program_surface_flip_and_addr =
|
||||
dce_mi_program_surface_flip_and_addr,
|
||||
.mem_input_program_pte_vm = dce_mi_program_pte_vm,
|
||||
.mem_input_program_surface_config =
|
||||
dce_mi_program_surface_config,
|
||||
.mem_input_is_flip_pending = dce_mi_is_flip_pending
|
||||
};
|
||||
|
||||
static const struct mem_input_funcs dce120_mi_funcs = {
|
||||
.mem_input_program_display_marks = dce120_mi_program_display_marks,
|
||||
.allocate_mem_input = dce_mi_allocate_dmif,
|
||||
.free_mem_input = dce_mi_free_dmif,
|
||||
.mem_input_program_surface_flip_and_addr =
|
||||
dce_mi_program_surface_flip_and_addr,
|
||||
.mem_input_program_pte_vm = dce_mi_program_pte_vm,
|
||||
.mem_input_program_surface_config =
|
||||
dce_mi_program_surface_config,
|
||||
.mem_input_is_flip_pending = dce_mi_is_flip_pending
|
||||
};
|
||||
|
||||
void dce_mem_input_construct(
|
||||
struct dce_mem_input *dce_mi,
|
||||
|
@ -769,7 +792,7 @@ void dce112_mem_input_construct(
|
|||
const struct dce_mem_input_mask *mi_mask)
|
||||
{
|
||||
dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask);
|
||||
dce_mi->base.funcs->mem_input_program_display_marks = dce112_mi_program_display_marks;
|
||||
dce_mi->base.funcs = &dce112_mi_funcs;
|
||||
}
|
||||
|
||||
void dce120_mem_input_construct(
|
||||
|
@ -781,5 +804,5 @@ void dce120_mem_input_construct(
|
|||
const struct dce_mem_input_mask *mi_mask)
|
||||
{
|
||||
dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask);
|
||||
dce_mi->base.funcs->mem_input_program_display_marks = dce120_mi_program_display_marks;
|
||||
dce_mi->base.funcs = &dce120_mi_funcs;
|
||||
}
|
||||
|
|
|
@ -135,7 +135,7 @@ static void dce110_update_generic_info_packet(
|
|||
AFMT_GENERIC0_UPDATE, (packet_index == 0),
|
||||
AFMT_GENERIC2_UPDATE, (packet_index == 2));
|
||||
}
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
||||
#ifdef CONFIG_X86
|
||||
if (REG(AFMT_VBI_PACKET_CONTROL1)) {
|
||||
switch (packet_index) {
|
||||
case 0:
|
||||
|
@ -229,7 +229,7 @@ static void dce110_update_hdmi_info_packet(
|
|||
HDMI_GENERIC1_SEND, send,
|
||||
HDMI_GENERIC1_LINE, line);
|
||||
break;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
||||
#ifdef CONFIG_X86
|
||||
case 4:
|
||||
if (REG(HDMI_GENERIC_PACKET_CONTROL2))
|
||||
REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL2,
|
||||
|
@ -274,7 +274,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
|
|||
struct dc_crtc_timing *crtc_timing,
|
||||
enum dc_color_space output_color_space)
|
||||
{
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
||||
#ifdef CONFIG_X86
|
||||
uint32_t h_active_start;
|
||||
uint32_t v_active_start;
|
||||
uint32_t misc0 = 0;
|
||||
|
@ -317,7 +317,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
|
|||
if (enc110->se_mask->DP_VID_M_DOUBLE_VALUE_EN)
|
||||
REG_UPDATE(DP_VID_TIMING, DP_VID_M_DOUBLE_VALUE_EN, 1);
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
||||
#ifdef CONFIG_X86
|
||||
if (enc110->se_mask->DP_VID_N_MUL)
|
||||
REG_UPDATE(DP_VID_TIMING, DP_VID_N_MUL, 1);
|
||||
#endif
|
||||
|
@ -328,7 +328,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
|
|||
break;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
||||
#ifdef CONFIG_X86
|
||||
if (REG(DP_MSA_MISC))
|
||||
misc1 = REG_READ(DP_MSA_MISC);
|
||||
#endif
|
||||
|
@ -362,7 +362,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
|
|||
/* set dynamic range and YCbCr range */
|
||||
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
||||
#ifdef CONFIG_X86
|
||||
switch (crtc_timing->display_color_depth) {
|
||||
case COLOR_DEPTH_666:
|
||||
colorimetry_bpc = 0;
|
||||
|
@ -441,7 +441,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
|
|||
DP_DYN_RANGE, dynamic_range_rgb,
|
||||
DP_YCBCR_RANGE, dynamic_range_ycbcr);
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
||||
#ifdef CONFIG_X86
|
||||
if (REG(DP_MSA_COLORIMETRY))
|
||||
REG_SET(DP_MSA_COLORIMETRY, 0, DP_MSA_MISC0, misc0);
|
||||
|
||||
|
@ -476,7 +476,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
|
|||
crtc_timing->v_front_porch;
|
||||
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
||||
#ifdef CONFIG_X86
|
||||
/* start at begining of left border */
|
||||
if (REG(DP_MSA_TIMING_PARAM2))
|
||||
REG_SET_2(DP_MSA_TIMING_PARAM2, 0,
|
||||
|
@ -751,7 +751,7 @@ static void dce110_stream_encoder_update_hdmi_info_packets(
|
|||
dce110_update_hdmi_info_packet(enc110, 3, &info_frame->hdrsmd);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
||||
#ifdef CONFIG_X86
|
||||
if (enc110->se_mask->HDMI_DB_DISABLE) {
|
||||
/* for bring up, disable dp double TODO */
|
||||
if (REG(HDMI_DB_CONTROL))
|
||||
|
@ -789,7 +789,7 @@ static void dce110_stream_encoder_stop_hdmi_info_packets(
|
|||
HDMI_GENERIC1_LINE, 0,
|
||||
HDMI_GENERIC1_SEND, 0);
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
||||
#ifdef CONFIG_X86
|
||||
/* stop generic packets 2 & 3 on HDMI */
|
||||
if (REG(HDMI_GENERIC_PACKET_CONTROL2))
|
||||
REG_SET_6(HDMI_GENERIC_PACKET_CONTROL2, 0,
|
||||
|
|
|
@ -155,7 +155,7 @@ static void program_overscan(
|
|||
int overscan_bottom = data->v_active
|
||||
- data->recout.y - data->recout.height;
|
||||
|
||||
if (xfm_dce->base.ctx->dc->debug.surface_visual_confirm) {
|
||||
if (xfm_dce->base.ctx->dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) {
|
||||
overscan_bottom += 2;
|
||||
overscan_right += 2;
|
||||
}
|
||||
|
|
|
@ -864,17 +864,22 @@ void hwss_edp_power_control(
|
|||
if (power_up) {
|
||||
unsigned long long current_ts = dm_get_timestamp(ctx);
|
||||
unsigned long long duration_in_ms =
|
||||
dm_get_elapse_time_in_ns(
|
||||
div64_u64(dm_get_elapse_time_in_ns(
|
||||
ctx,
|
||||
current_ts,
|
||||
div64_u64(link->link_trace.time_stamp.edp_poweroff, 1000000));
|
||||
link->link_trace.time_stamp.edp_poweroff), 1000000);
|
||||
unsigned long long wait_time_ms = 0;
|
||||
|
||||
/* max 500ms from LCDVDD off to on */
|
||||
unsigned long long edp_poweroff_time_ms = 500;
|
||||
|
||||
if (link->local_sink != NULL)
|
||||
edp_poweroff_time_ms =
|
||||
500 + link->local_sink->edid_caps.panel_patch.extra_t12_ms;
|
||||
if (link->link_trace.time_stamp.edp_poweroff == 0)
|
||||
wait_time_ms = 500;
|
||||
else if (duration_in_ms < 500)
|
||||
wait_time_ms = 500 - duration_in_ms;
|
||||
wait_time_ms = edp_poweroff_time_ms;
|
||||
else if (duration_in_ms < edp_poweroff_time_ms)
|
||||
wait_time_ms = edp_poweroff_time_ms - duration_in_ms;
|
||||
|
||||
if (wait_time_ms) {
|
||||
msleep(wait_time_ms);
|
||||
|
@ -1245,13 +1250,13 @@ static void program_scaler(const struct dc *dc,
|
|||
{
|
||||
struct tg_color color = {0};
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
||||
#ifdef CONFIG_X86
|
||||
/* TOFPGA */
|
||||
if (pipe_ctx->plane_res.xfm->funcs->transform_set_pixel_storage_depth == NULL)
|
||||
return;
|
||||
#endif
|
||||
|
||||
if (dc->debug.surface_visual_confirm)
|
||||
if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
|
||||
get_surface_visual_confirm_color(pipe_ctx, &color);
|
||||
else
|
||||
color_space_to_black_color(dc,
|
||||
|
@ -2801,9 +2806,11 @@ void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx)
|
|||
struct dc_cursor_mi_param param = {
|
||||
.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_khz,
|
||||
.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz,
|
||||
.viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x,
|
||||
.viewport_width = pipe_ctx->plane_res.scl_data.viewport.width,
|
||||
.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz
|
||||
.viewport = pipe_ctx->plane_res.scl_data.viewport,
|
||||
.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
|
||||
.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
|
||||
.rotation = pipe_ctx->plane_state->rotation,
|
||||
.mirror = pipe_ctx->plane_state->horizontal_mirror
|
||||
};
|
||||
|
||||
if (pipe_ctx->plane_state->address.type
|
||||
|
|
|
@ -1011,7 +1011,7 @@ void dce110_free_mem_input_v(
|
|||
{
|
||||
}
|
||||
|
||||
static struct mem_input_funcs dce110_mem_input_v_funcs = {
|
||||
static const struct mem_input_funcs dce110_mem_input_v_funcs = {
|
||||
.mem_input_program_display_marks =
|
||||
dce_mem_input_v_program_display_marks,
|
||||
.mem_input_program_chroma_display_marks =
|
||||
|
|
|
@ -794,43 +794,38 @@ static bool dce110_validate_bandwidth(
|
|||
|
||||
if (memcmp(&dc->current_state->bw.dce,
|
||||
&context->bw.dce, sizeof(context->bw.dce))) {
|
||||
struct log_entry log_entry;
|
||||
dm_logger_open(
|
||||
dc->ctx->logger,
|
||||
&log_entry,
|
||||
LOG_BANDWIDTH_CALCS);
|
||||
dm_logger_append(&log_entry, "%s: finish,\n"
|
||||
|
||||
DC_LOG_BANDWIDTH_CALCS(
|
||||
"%s: finish,\n"
|
||||
"nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
|
||||
"stutMark_b: %d stutMark_a: %d\n",
|
||||
"stutMark_b: %d stutMark_a: %d\n"
|
||||
"nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
|
||||
"stutMark_b: %d stutMark_a: %d\n"
|
||||
"nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
|
||||
"stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n"
|
||||
"cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n"
|
||||
"sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n"
|
||||
,
|
||||
__func__,
|
||||
context->bw.dce.nbp_state_change_wm_ns[0].b_mark,
|
||||
context->bw.dce.nbp_state_change_wm_ns[0].a_mark,
|
||||
context->bw.dce.urgent_wm_ns[0].b_mark,
|
||||
context->bw.dce.urgent_wm_ns[0].a_mark,
|
||||
context->bw.dce.stutter_exit_wm_ns[0].b_mark,
|
||||
context->bw.dce.stutter_exit_wm_ns[0].a_mark);
|
||||
dm_logger_append(&log_entry,
|
||||
"nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
|
||||
"stutMark_b: %d stutMark_a: %d\n",
|
||||
context->bw.dce.stutter_exit_wm_ns[0].a_mark,
|
||||
context->bw.dce.nbp_state_change_wm_ns[1].b_mark,
|
||||
context->bw.dce.nbp_state_change_wm_ns[1].a_mark,
|
||||
context->bw.dce.urgent_wm_ns[1].b_mark,
|
||||
context->bw.dce.urgent_wm_ns[1].a_mark,
|
||||
context->bw.dce.stutter_exit_wm_ns[1].b_mark,
|
||||
context->bw.dce.stutter_exit_wm_ns[1].a_mark);
|
||||
dm_logger_append(&log_entry,
|
||||
"nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
|
||||
"stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n",
|
||||
context->bw.dce.stutter_exit_wm_ns[1].a_mark,
|
||||
context->bw.dce.nbp_state_change_wm_ns[2].b_mark,
|
||||
context->bw.dce.nbp_state_change_wm_ns[2].a_mark,
|
||||
context->bw.dce.urgent_wm_ns[2].b_mark,
|
||||
context->bw.dce.urgent_wm_ns[2].a_mark,
|
||||
context->bw.dce.stutter_exit_wm_ns[2].b_mark,
|
||||
context->bw.dce.stutter_exit_wm_ns[2].a_mark,
|
||||
context->bw.dce.stutter_mode_enable);
|
||||
dm_logger_append(&log_entry,
|
||||
"cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n"
|
||||
"sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n",
|
||||
context->bw.dce.stutter_mode_enable,
|
||||
context->bw.dce.cpuc_state_change_enable,
|
||||
context->bw.dce.cpup_state_change_enable,
|
||||
context->bw.dce.nbp_state_change_enable,
|
||||
|
@ -840,7 +835,6 @@ static bool dce110_validate_bandwidth(
|
|||
context->bw.dce.sclk_deep_sleep_khz,
|
||||
context->bw.dce.yclk_khz,
|
||||
context->bw.dce.blackout_recovery_time_us);
|
||||
dm_logger_close(&log_entry);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
|
|
@ -235,7 +235,7 @@ static void program_overscan(
|
|||
int overscan_right = data->h_active - data->recout.x - data->recout.width;
|
||||
int overscan_bottom = data->v_active - data->recout.y - data->recout.height;
|
||||
|
||||
if (xfm_dce->base.ctx->dc->debug.surface_visual_confirm) {
|
||||
if (xfm_dce->base.ctx->dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) {
|
||||
overscan_bottom += 2;
|
||||
overscan_right += 2;
|
||||
}
|
||||
|
|
|
@ -744,43 +744,38 @@ bool dce112_validate_bandwidth(
|
|||
|
||||
if (memcmp(&dc->current_state->bw.dce,
|
||||
&context->bw.dce, sizeof(context->bw.dce))) {
|
||||
struct log_entry log_entry;
|
||||
dm_logger_open(
|
||||
dc->ctx->logger,
|
||||
&log_entry,
|
||||
LOG_BANDWIDTH_CALCS);
|
||||
dm_logger_append(&log_entry, "%s: finish,\n"
|
||||
|
||||
DC_LOG_BANDWIDTH_CALCS(
|
||||
"%s: finish,\n"
|
||||
"nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
|
||||
"stutMark_b: %d stutMark_a: %d\n",
|
||||
"stutMark_b: %d stutMark_a: %d\n"
|
||||
"nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
|
||||
"stutMark_b: %d stutMark_a: %d\n"
|
||||
"nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
|
||||
"stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n"
|
||||
"cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n"
|
||||
"sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n"
|
||||
,
|
||||
__func__,
|
||||
context->bw.dce.nbp_state_change_wm_ns[0].b_mark,
|
||||
context->bw.dce.nbp_state_change_wm_ns[0].a_mark,
|
||||
context->bw.dce.urgent_wm_ns[0].b_mark,
|
||||
context->bw.dce.urgent_wm_ns[0].a_mark,
|
||||
context->bw.dce.stutter_exit_wm_ns[0].b_mark,
|
||||
context->bw.dce.stutter_exit_wm_ns[0].a_mark);
|
||||
dm_logger_append(&log_entry,
|
||||
"nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
|
||||
"stutMark_b: %d stutMark_a: %d\n",
|
||||
context->bw.dce.stutter_exit_wm_ns[0].a_mark,
|
||||
context->bw.dce.nbp_state_change_wm_ns[1].b_mark,
|
||||
context->bw.dce.nbp_state_change_wm_ns[1].a_mark,
|
||||
context->bw.dce.urgent_wm_ns[1].b_mark,
|
||||
context->bw.dce.urgent_wm_ns[1].a_mark,
|
||||
context->bw.dce.stutter_exit_wm_ns[1].b_mark,
|
||||
context->bw.dce.stutter_exit_wm_ns[1].a_mark);
|
||||
dm_logger_append(&log_entry,
|
||||
"nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
|
||||
"stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n",
|
||||
context->bw.dce.stutter_exit_wm_ns[1].a_mark,
|
||||
context->bw.dce.nbp_state_change_wm_ns[2].b_mark,
|
||||
context->bw.dce.nbp_state_change_wm_ns[2].a_mark,
|
||||
context->bw.dce.urgent_wm_ns[2].b_mark,
|
||||
context->bw.dce.urgent_wm_ns[2].a_mark,
|
||||
context->bw.dce.stutter_exit_wm_ns[2].b_mark,
|
||||
context->bw.dce.stutter_exit_wm_ns[2].a_mark,
|
||||
context->bw.dce.stutter_mode_enable);
|
||||
dm_logger_append(&log_entry,
|
||||
"cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n"
|
||||
"sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n",
|
||||
context->bw.dce.stutter_mode_enable,
|
||||
context->bw.dce.cpuc_state_change_enable,
|
||||
context->bw.dce.cpup_state_change_enable,
|
||||
context->bw.dce.nbp_state_change_enable,
|
||||
|
@ -790,7 +785,6 @@ bool dce112_validate_bandwidth(
|
|||
context->bw.dce.sclk_deep_sleep_khz,
|
||||
context->bw.dce.yclk_khz,
|
||||
context->bw.dce.blackout_recovery_time_us);
|
||||
dm_logger_close(&log_entry);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
|
|
@ -445,10 +445,10 @@ void dpp1_set_cursor_position(
|
|||
uint32_t width)
|
||||
{
|
||||
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
|
||||
int src_x_offset = pos->x - pos->x_hotspot - param->viewport_x_start;
|
||||
int src_x_offset = pos->x - pos->x_hotspot - param->viewport.x;
|
||||
uint32_t cur_en = pos->enable ? 1 : 0;
|
||||
|
||||
if (src_x_offset >= (int)param->viewport_width)
|
||||
if (src_x_offset >= (int)param->viewport.width)
|
||||
cur_en = 0; /* not visible beyond right edge*/
|
||||
|
||||
if (src_x_offset + (int)width <= 0)
|
||||
|
@ -459,6 +459,18 @@ void dpp1_set_cursor_position(
|
|||
|
||||
}
|
||||
|
||||
void dpp1_cnv_set_optional_cursor_attributes(
|
||||
struct dpp *dpp_base,
|
||||
struct dpp_cursor_attributes *attr)
|
||||
{
|
||||
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
|
||||
|
||||
if (attr) {
|
||||
REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_BIAS, attr->bias);
|
||||
REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, attr->scale);
|
||||
}
|
||||
}
|
||||
|
||||
void dpp1_dppclk_control(
|
||||
struct dpp *dpp_base,
|
||||
bool dppclk_div,
|
||||
|
@ -499,6 +511,7 @@ static const struct dpp_funcs dcn10_dpp_funcs = {
|
|||
.dpp_full_bypass = dpp1_full_bypass,
|
||||
.set_cursor_attributes = dpp1_set_cursor_attributes,
|
||||
.set_cursor_position = dpp1_set_cursor_position,
|
||||
.set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes,
|
||||
.dpp_dppclk_control = dpp1_dppclk_control,
|
||||
.dpp_set_hdr_multiplier = dpp1_set_hdr_multiplier,
|
||||
};
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue