mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-30 08:02:30 +00:00
drm fixes for v5.4-rc5
komeda: - typo fixes - flushing pipes fix amdgpu: - Fix suspend/resume issue related to multi-media engines - Fix memory leak in user ptr code related to hmm conversion - Fix possible VM faults when allocating page table memory - Fix error handling in bo list ioctl -----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJdspNYAAoJEAx081l5xIa+/moP/RF6DVDmdbIsjCeQfVgIGRAd djKzdjR3pALJFrwb1k78kMydzaE7zf5s3sIW2aIJCjl4+TnC9YsHzViYQtf1vb4G TqaUpi3xDLKXtgUoG3ruHcXjnQypmJclMZ6JgLraEFVr9JSqKgexEHiYbTMjnmHC MRChtrlY720iU+SbTCwENOTkOPBT1pu9YjXehzWmmWFWT5QGzO3wPOAr+c9ld2S5 JYdpmXInUY5XKHcpZ9Gaaaa9MjF9rUz5oSJx/PXqjUTS1DrJDfzuWGrD1M0DCi0G S6K5S657yXoIZrYVbMwfaeCfrZfF0oJh8iR8G5fdwwJ5om44bCuHkFNeeRNqLaA7 xPlPRHILq48UYWO37wHK7Z3tKD+4DGUS12ngFFvPFnp0KbTIdGaupErfLNYCoZSk JHR0GDaAITT+qPO2fYC12qmv9mBmJQ5lU7f6jpuya9EbouyVT9RIcSvIsD+7fpaD uXYspo/rv6sWi66XCZlzkqBqk3E1ZctjUnFP5V3Mo1JVwUMR0QkIyVfX2NKFpoE7 TWpbji9oXfu2+UKYVKIu1WgoSG/8HlUSzY8OXke4lbXyIiIwbctKxsJeaanTAjeI vjCuNFXwzFysFykEKjAMC+xzMtosVIVZ51m+zmBFaU+KUBbL5TvFMkEUC5GHqj9T 4GGYcjiUgX2xX3R3mmFs =k3rD -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2019-10-25' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Dave Airlie: "Quiet week this week, which I suspect means some people just didn't get around to sending me fixes pulls in time. This has 2 komeda and a bunch of amdgpu fixes in it: komeda: - typo fixes - flushing pipes fix amdgpu: - Fix suspend/resume issue related to multi-media engines - Fix memory leak in user ptr code related to hmm conversion - Fix possible VM faults when allocating page table memory - Fix error handling in bo list ioctl" * tag 'drm-fixes-2019-10-25' of git://anongit.freedesktop.org/drm/drm: drm/komeda: Fix typos in komeda_splitter_validate drm/komeda: Don't flush inactive pipes drm/amdgpu/vce: fix allocation size in enc ring test drm/amdgpu: fix error handling in amdgpu_bo_list_create drm/amdgpu: fix potential VM faults drm/amdgpu: user pages array memory leak fix drm/amdgpu/vcn: fix allocation size in enc ring test drm/amdgpu/uvd7: fix allocation size in enc ring test (v2) drm/amdgpu/uvd6: fix allocation size in enc ring test (v2)
This commit is contained in:
commit
8caacaad78
10 changed files with 96 additions and 49 deletions
|
@ -140,7 +140,12 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
|
|||
return 0;
|
||||
|
||||
error_free:
|
||||
while (i--) {
|
||||
for (i = 0; i < last_entry; ++i) {
|
||||
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo);
|
||||
|
||||
amdgpu_bo_unref(&bo);
|
||||
}
|
||||
for (i = first_userptr; i < num_entries; ++i) {
|
||||
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo);
|
||||
|
||||
amdgpu_bo_unref(&bo);
|
||||
|
|
|
@ -536,7 +536,6 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
|
|||
|
||||
list_for_each_entry(lobj, validated, tv.head) {
|
||||
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo);
|
||||
bool binding_userptr = false;
|
||||
struct mm_struct *usermm;
|
||||
|
||||
usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
|
||||
|
@ -553,7 +552,6 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
|
|||
|
||||
amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
|
||||
lobj->user_pages);
|
||||
binding_userptr = true;
|
||||
}
|
||||
|
||||
if (p->evictable == lobj)
|
||||
|
@ -563,11 +561,9 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
if (binding_userptr) {
|
||||
kvfree(lobj->user_pages);
|
||||
lobj->user_pages = NULL;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -453,7 +453,8 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
|
|||
.interruptible = (bp->type != ttm_bo_type_kernel),
|
||||
.no_wait_gpu = false,
|
||||
.resv = bp->resv,
|
||||
.flags = TTM_OPT_FLAG_ALLOW_RES_EVICT
|
||||
.flags = bp->type != ttm_bo_type_kernel ?
|
||||
TTM_OPT_FLAG_ALLOW_RES_EVICT : 0
|
||||
};
|
||||
struct amdgpu_bo *bo;
|
||||
unsigned long page_align, size = bp->size;
|
||||
|
|
|
@ -429,13 +429,14 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
|
|||
* Open up a stream for HW test
|
||||
*/
|
||||
int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
struct amdgpu_bo *bo,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
const unsigned ib_size_dw = 1024;
|
||||
struct amdgpu_job *job;
|
||||
struct amdgpu_ib *ib;
|
||||
struct dma_fence *f = NULL;
|
||||
uint64_t dummy;
|
||||
uint64_t addr;
|
||||
int i, r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
|
||||
|
@ -444,7 +445,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
|||
|
||||
ib = &job->ibs[0];
|
||||
|
||||
dummy = ib->gpu_addr + 1024;
|
||||
addr = amdgpu_bo_gpu_offset(bo);
|
||||
|
||||
/* stitch together an VCE create msg */
|
||||
ib->length_dw = 0;
|
||||
|
@ -476,8 +477,8 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
|||
|
||||
ib->ptr[ib->length_dw++] = 0x00000014; /* len */
|
||||
ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
|
||||
ib->ptr[ib->length_dw++] = dummy;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(addr);
|
||||
ib->ptr[ib->length_dw++] = addr;
|
||||
ib->ptr[ib->length_dw++] = 0x00000001;
|
||||
|
||||
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
||||
|
@ -1110,13 +1111,20 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
|
|||
int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||
{
|
||||
struct dma_fence *fence = NULL;
|
||||
struct amdgpu_bo *bo = NULL;
|
||||
long r;
|
||||
|
||||
/* skip vce ring1/2 ib test for now, since it's not reliable */
|
||||
if (ring != &ring->adev->vce.ring[0])
|
||||
return 0;
|
||||
|
||||
r = amdgpu_vce_get_create_msg(ring, 1, NULL);
|
||||
r = amdgpu_bo_create_reserved(ring->adev, 512, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&bo, NULL, NULL);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_vce_get_create_msg(ring, 1, bo, NULL);
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
|
@ -1132,5 +1140,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
|||
|
||||
error:
|
||||
dma_fence_put(fence);
|
||||
amdgpu_bo_unreserve(bo);
|
||||
amdgpu_bo_unref(&bo);
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -59,6 +59,7 @@ int amdgpu_vce_entity_init(struct amdgpu_device *adev);
|
|||
int amdgpu_vce_suspend(struct amdgpu_device *adev);
|
||||
int amdgpu_vce_resume(struct amdgpu_device *adev);
|
||||
int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
struct amdgpu_bo *bo,
|
||||
struct dma_fence **fence);
|
||||
int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
bool direct, struct dma_fence **fence);
|
||||
|
|
|
@ -569,13 +569,14 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
|
|||
}
|
||||
|
||||
static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
struct amdgpu_bo *bo,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
const unsigned ib_size_dw = 16;
|
||||
struct amdgpu_job *job;
|
||||
struct amdgpu_ib *ib;
|
||||
struct dma_fence *f = NULL;
|
||||
uint64_t dummy;
|
||||
uint64_t addr;
|
||||
int i, r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
|
||||
|
@ -583,14 +584,14 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
|
|||
return r;
|
||||
|
||||
ib = &job->ibs[0];
|
||||
dummy = ib->gpu_addr + 1024;
|
||||
addr = amdgpu_bo_gpu_offset(bo);
|
||||
|
||||
ib->length_dw = 0;
|
||||
ib->ptr[ib->length_dw++] = 0x00000018;
|
||||
ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
|
||||
ib->ptr[ib->length_dw++] = handle;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
|
||||
ib->ptr[ib->length_dw++] = dummy;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(addr);
|
||||
ib->ptr[ib->length_dw++] = addr;
|
||||
ib->ptr[ib->length_dw++] = 0x0000000b;
|
||||
|
||||
ib->ptr[ib->length_dw++] = 0x00000014;
|
||||
|
@ -621,13 +622,14 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
|
|||
}
|
||||
|
||||
static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
struct amdgpu_bo *bo,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
const unsigned ib_size_dw = 16;
|
||||
struct amdgpu_job *job;
|
||||
struct amdgpu_ib *ib;
|
||||
struct dma_fence *f = NULL;
|
||||
uint64_t dummy;
|
||||
uint64_t addr;
|
||||
int i, r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
|
||||
|
@ -635,14 +637,14 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
|
|||
return r;
|
||||
|
||||
ib = &job->ibs[0];
|
||||
dummy = ib->gpu_addr + 1024;
|
||||
addr = amdgpu_bo_gpu_offset(bo);
|
||||
|
||||
ib->length_dw = 0;
|
||||
ib->ptr[ib->length_dw++] = 0x00000018;
|
||||
ib->ptr[ib->length_dw++] = 0x00000001;
|
||||
ib->ptr[ib->length_dw++] = handle;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
|
||||
ib->ptr[ib->length_dw++] = dummy;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(addr);
|
||||
ib->ptr[ib->length_dw++] = addr;
|
||||
ib->ptr[ib->length_dw++] = 0x0000000b;
|
||||
|
||||
ib->ptr[ib->length_dw++] = 0x00000014;
|
||||
|
@ -675,13 +677,20 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
|
|||
int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||
{
|
||||
struct dma_fence *fence = NULL;
|
||||
struct amdgpu_bo *bo = NULL;
|
||||
long r;
|
||||
|
||||
r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL);
|
||||
r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&bo, NULL, NULL);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_vcn_enc_get_create_msg(ring, 1, bo, NULL);
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence);
|
||||
r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, bo, &fence);
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
|
@ -693,6 +702,8 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
|||
|
||||
error:
|
||||
dma_fence_put(fence);
|
||||
amdgpu_bo_unreserve(bo);
|
||||
amdgpu_bo_unref(&bo);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
|
|
@ -206,13 +206,14 @@ static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
|
|||
* Open up a stream for HW test
|
||||
*/
|
||||
static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
struct amdgpu_bo *bo,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
const unsigned ib_size_dw = 16;
|
||||
struct amdgpu_job *job;
|
||||
struct amdgpu_ib *ib;
|
||||
struct dma_fence *f = NULL;
|
||||
uint64_t dummy;
|
||||
uint64_t addr;
|
||||
int i, r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
|
||||
|
@ -220,15 +221,15 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
|
|||
return r;
|
||||
|
||||
ib = &job->ibs[0];
|
||||
dummy = ib->gpu_addr + 1024;
|
||||
addr = amdgpu_bo_gpu_offset(bo);
|
||||
|
||||
ib->length_dw = 0;
|
||||
ib->ptr[ib->length_dw++] = 0x00000018;
|
||||
ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
|
||||
ib->ptr[ib->length_dw++] = handle;
|
||||
ib->ptr[ib->length_dw++] = 0x00010000;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
|
||||
ib->ptr[ib->length_dw++] = dummy;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(addr);
|
||||
ib->ptr[ib->length_dw++] = addr;
|
||||
|
||||
ib->ptr[ib->length_dw++] = 0x00000014;
|
||||
ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
|
||||
|
@ -268,13 +269,14 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
|
|||
*/
|
||||
static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
|
||||
uint32_t handle,
|
||||
struct amdgpu_bo *bo,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
const unsigned ib_size_dw = 16;
|
||||
struct amdgpu_job *job;
|
||||
struct amdgpu_ib *ib;
|
||||
struct dma_fence *f = NULL;
|
||||
uint64_t dummy;
|
||||
uint64_t addr;
|
||||
int i, r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
|
||||
|
@ -282,15 +284,15 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
|
|||
return r;
|
||||
|
||||
ib = &job->ibs[0];
|
||||
dummy = ib->gpu_addr + 1024;
|
||||
addr = amdgpu_bo_gpu_offset(bo);
|
||||
|
||||
ib->length_dw = 0;
|
||||
ib->ptr[ib->length_dw++] = 0x00000018;
|
||||
ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
|
||||
ib->ptr[ib->length_dw++] = handle;
|
||||
ib->ptr[ib->length_dw++] = 0x00010000;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
|
||||
ib->ptr[ib->length_dw++] = dummy;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(addr);
|
||||
ib->ptr[ib->length_dw++] = addr;
|
||||
|
||||
ib->ptr[ib->length_dw++] = 0x00000014;
|
||||
ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
|
||||
|
@ -327,13 +329,20 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
|
|||
static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||
{
|
||||
struct dma_fence *fence = NULL;
|
||||
struct amdgpu_bo *bo = NULL;
|
||||
long r;
|
||||
|
||||
r = uvd_v6_0_enc_get_create_msg(ring, 1, NULL);
|
||||
r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&bo, NULL, NULL);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = uvd_v6_0_enc_get_create_msg(ring, 1, bo, NULL);
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
r = uvd_v6_0_enc_get_destroy_msg(ring, 1, &fence);
|
||||
r = uvd_v6_0_enc_get_destroy_msg(ring, 1, bo, &fence);
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
|
@ -345,6 +354,8 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
|||
|
||||
error:
|
||||
dma_fence_put(fence);
|
||||
amdgpu_bo_unreserve(bo);
|
||||
amdgpu_bo_unref(&bo);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
|
|
@ -214,13 +214,14 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
|
|||
* Open up a stream for HW test
|
||||
*/
|
||||
static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
struct amdgpu_bo *bo,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
const unsigned ib_size_dw = 16;
|
||||
struct amdgpu_job *job;
|
||||
struct amdgpu_ib *ib;
|
||||
struct dma_fence *f = NULL;
|
||||
uint64_t dummy;
|
||||
uint64_t addr;
|
||||
int i, r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
|
||||
|
@ -228,15 +229,15 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
|
|||
return r;
|
||||
|
||||
ib = &job->ibs[0];
|
||||
dummy = ib->gpu_addr + 1024;
|
||||
addr = amdgpu_bo_gpu_offset(bo);
|
||||
|
||||
ib->length_dw = 0;
|
||||
ib->ptr[ib->length_dw++] = 0x00000018;
|
||||
ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
|
||||
ib->ptr[ib->length_dw++] = handle;
|
||||
ib->ptr[ib->length_dw++] = 0x00000000;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
|
||||
ib->ptr[ib->length_dw++] = dummy;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(addr);
|
||||
ib->ptr[ib->length_dw++] = addr;
|
||||
|
||||
ib->ptr[ib->length_dw++] = 0x00000014;
|
||||
ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
|
||||
|
@ -275,13 +276,14 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
|
|||
* Close up a stream for HW test or if userspace failed to do so
|
||||
*/
|
||||
static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
struct amdgpu_bo *bo,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
const unsigned ib_size_dw = 16;
|
||||
struct amdgpu_job *job;
|
||||
struct amdgpu_ib *ib;
|
||||
struct dma_fence *f = NULL;
|
||||
uint64_t dummy;
|
||||
uint64_t addr;
|
||||
int i, r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
|
||||
|
@ -289,15 +291,15 @@ static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handl
|
|||
return r;
|
||||
|
||||
ib = &job->ibs[0];
|
||||
dummy = ib->gpu_addr + 1024;
|
||||
addr = amdgpu_bo_gpu_offset(bo);
|
||||
|
||||
ib->length_dw = 0;
|
||||
ib->ptr[ib->length_dw++] = 0x00000018;
|
||||
ib->ptr[ib->length_dw++] = 0x00000001;
|
||||
ib->ptr[ib->length_dw++] = handle;
|
||||
ib->ptr[ib->length_dw++] = 0x00000000;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
|
||||
ib->ptr[ib->length_dw++] = dummy;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(addr);
|
||||
ib->ptr[ib->length_dw++] = addr;
|
||||
|
||||
ib->ptr[ib->length_dw++] = 0x00000014;
|
||||
ib->ptr[ib->length_dw++] = 0x00000002;
|
||||
|
@ -334,13 +336,20 @@ static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handl
|
|||
static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||
{
|
||||
struct dma_fence *fence = NULL;
|
||||
struct amdgpu_bo *bo = NULL;
|
||||
long r;
|
||||
|
||||
r = uvd_v7_0_enc_get_create_msg(ring, 1, NULL);
|
||||
r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&bo, NULL, NULL);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = uvd_v7_0_enc_get_create_msg(ring, 1, bo, NULL);
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
r = uvd_v7_0_enc_get_destroy_msg(ring, 1, &fence);
|
||||
r = uvd_v7_0_enc_get_destroy_msg(ring, 1, bo, &fence);
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
|
@ -352,6 +361,8 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
|||
|
||||
error:
|
||||
dma_fence_put(fence);
|
||||
amdgpu_bo_unreserve(bo);
|
||||
amdgpu_bo_unref(&bo);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
|
|
@ -82,7 +82,8 @@ static void komeda_kms_commit_tail(struct drm_atomic_state *old_state)
|
|||
|
||||
drm_atomic_helper_commit_modeset_disables(dev, old_state);
|
||||
|
||||
drm_atomic_helper_commit_planes(dev, old_state, 0);
|
||||
drm_atomic_helper_commit_planes(dev, old_state,
|
||||
DRM_PLANE_COMMIT_ACTIVE_ONLY);
|
||||
|
||||
drm_atomic_helper_commit_modeset_enables(dev, old_state);
|
||||
|
||||
|
|
|
@ -564,8 +564,8 @@ komeda_splitter_validate(struct komeda_splitter *splitter,
|
|||
}
|
||||
|
||||
if (!in_range(&splitter->vsize, dflow->in_h)) {
|
||||
DRM_DEBUG_ATOMIC("split in_in: %d exceed the acceptable range.\n",
|
||||
dflow->in_w);
|
||||
DRM_DEBUG_ATOMIC("split in_h: %d exceeds the acceptable range.\n",
|
||||
dflow->in_h);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue