drm/amdgpu: rename all rbo variable to abo v2

Just to cleanup some radeon leftovers.

sed -i "s/rbo/abo/g" drivers/gpu/drm/amd/amdgpu/*.c
sed -i "s/rbo/abo/g" drivers/gpu/drm/amd/amdgpu/*.h

v2: rebased

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Christian König 2016-09-15 15:06:50 +02:00 committed by Alex Deucher
parent 1927ffc0c1
commit 765e7fbf08
12 changed files with 159 additions and 159 deletions

View File

@ -703,7 +703,7 @@ struct amdgpu_flip_work {
u32 target_vblank;
uint64_t base;
struct drm_pending_vblank_event *event;
struct amdgpu_bo *old_rbo;
struct amdgpu_bo *old_abo;
struct fence *excl;
unsigned shared_count;
struct fence **shared;
@ -2416,7 +2416,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
u32 ip_instance, u32 ring,
struct amdgpu_ring **out_ring);
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain);
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,

View File

@ -123,17 +123,17 @@ static void amdgpu_unpin_work_func(struct work_struct *__work)
int r;
/* unpin of the old buffer */
r = amdgpu_bo_reserve(work->old_rbo, false);
r = amdgpu_bo_reserve(work->old_abo, false);
if (likely(r == 0)) {
r = amdgpu_bo_unpin(work->old_rbo);
r = amdgpu_bo_unpin(work->old_abo);
if (unlikely(r != 0)) {
DRM_ERROR("failed to unpin buffer after flip\n");
}
amdgpu_bo_unreserve(work->old_rbo);
amdgpu_bo_unreserve(work->old_abo);
} else
DRM_ERROR("failed to reserve buffer after flip\n");
amdgpu_bo_unref(&work->old_rbo);
amdgpu_bo_unref(&work->old_abo);
kfree(work->shared);
kfree(work);
}
@ -150,7 +150,7 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
struct amdgpu_framebuffer *new_amdgpu_fb;
struct drm_gem_object *obj;
struct amdgpu_flip_work *work;
struct amdgpu_bo *new_rbo;
struct amdgpu_bo *new_abo;
unsigned long flags;
u64 tiling_flags;
u64 base;
@ -173,28 +173,28 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
obj = old_amdgpu_fb->obj;
/* take a reference to the old object */
work->old_rbo = gem_to_amdgpu_bo(obj);
amdgpu_bo_ref(work->old_rbo);
work->old_abo = gem_to_amdgpu_bo(obj);
amdgpu_bo_ref(work->old_abo);
new_amdgpu_fb = to_amdgpu_framebuffer(fb);
obj = new_amdgpu_fb->obj;
new_rbo = gem_to_amdgpu_bo(obj);
new_abo = gem_to_amdgpu_bo(obj);
/* pin the new buffer */
r = amdgpu_bo_reserve(new_rbo, false);
r = amdgpu_bo_reserve(new_abo, false);
if (unlikely(r != 0)) {
DRM_ERROR("failed to reserve new rbo buffer before flip\n");
DRM_ERROR("failed to reserve new abo buffer before flip\n");
goto cleanup;
}
r = amdgpu_bo_pin_restricted(new_rbo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, &base);
r = amdgpu_bo_pin_restricted(new_abo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, &base);
if (unlikely(r != 0)) {
r = -EINVAL;
DRM_ERROR("failed to pin new rbo buffer before flip\n");
DRM_ERROR("failed to pin new abo buffer before flip\n");
goto unreserve;
}
r = reservation_object_get_fences_rcu(new_rbo->tbo.resv, &work->excl,
r = reservation_object_get_fences_rcu(new_abo->tbo.resv, &work->excl,
&work->shared_count,
&work->shared);
if (unlikely(r != 0)) {
@ -202,8 +202,8 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
goto unpin;
}
amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags);
amdgpu_bo_unreserve(new_rbo);
amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags);
amdgpu_bo_unreserve(new_abo);
work->base = base;
work->target_vblank = target - drm_crtc_vblank_count(crtc) +
@ -231,19 +231,19 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
return 0;
pflip_cleanup:
if (unlikely(amdgpu_bo_reserve(new_rbo, false) != 0)) {
DRM_ERROR("failed to reserve new rbo in error path\n");
if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) {
DRM_ERROR("failed to reserve new abo in error path\n");
goto cleanup;
}
unpin:
if (unlikely(amdgpu_bo_unpin(new_rbo) != 0)) {
DRM_ERROR("failed to unpin new rbo in error path\n");
if (unlikely(amdgpu_bo_unpin(new_abo) != 0)) {
DRM_ERROR("failed to unpin new abo in error path\n");
}
unreserve:
amdgpu_bo_unreserve(new_rbo);
amdgpu_bo_unreserve(new_abo);
cleanup:
amdgpu_bo_unref(&work->old_rbo);
amdgpu_bo_unref(&work->old_abo);
fence_put(work->excl);
for (i = 0; i < work->shared_count; ++i)
fence_put(work->shared[i]);

View File

@ -116,14 +116,14 @@ int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tile
static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj)
{
struct amdgpu_bo *rbo = gem_to_amdgpu_bo(gobj);
struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
int ret;
ret = amdgpu_bo_reserve(rbo, false);
ret = amdgpu_bo_reserve(abo, false);
if (likely(ret == 0)) {
amdgpu_bo_kunmap(rbo);
amdgpu_bo_unpin(rbo);
amdgpu_bo_unreserve(rbo);
amdgpu_bo_kunmap(abo);
amdgpu_bo_unpin(abo);
amdgpu_bo_unreserve(abo);
}
drm_gem_object_unreference_unlocked(gobj);
}
@ -134,7 +134,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
{
struct amdgpu_device *adev = rfbdev->adev;
struct drm_gem_object *gobj = NULL;
struct amdgpu_bo *rbo = NULL;
struct amdgpu_bo *abo = NULL;
bool fb_tiled = false; /* useful for testing */
u32 tiling_flags = 0;
int ret;
@ -160,30 +160,30 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
aligned_size);
return -ENOMEM;
}
rbo = gem_to_amdgpu_bo(gobj);
abo = gem_to_amdgpu_bo(gobj);
if (fb_tiled)
tiling_flags = AMDGPU_TILING_SET(ARRAY_MODE, GRPH_ARRAY_2D_TILED_THIN1);
ret = amdgpu_bo_reserve(rbo, false);
ret = amdgpu_bo_reserve(abo, false);
if (unlikely(ret != 0))
goto out_unref;
if (tiling_flags) {
ret = amdgpu_bo_set_tiling_flags(rbo,
ret = amdgpu_bo_set_tiling_flags(abo,
tiling_flags);
if (ret)
dev_err(adev->dev, "FB failed to set tiling flags\n");
}
ret = amdgpu_bo_pin_restricted(rbo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, NULL);
ret = amdgpu_bo_pin_restricted(abo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, NULL);
if (ret) {
amdgpu_bo_unreserve(rbo);
amdgpu_bo_unreserve(abo);
goto out_unref;
}
ret = amdgpu_bo_kmap(rbo, NULL);
amdgpu_bo_unreserve(rbo);
ret = amdgpu_bo_kmap(abo, NULL);
amdgpu_bo_unreserve(abo);
if (ret) {
goto out_unref;
}
@ -205,7 +205,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
struct drm_framebuffer *fb = NULL;
struct drm_mode_fb_cmd2 mode_cmd;
struct drm_gem_object *gobj = NULL;
struct amdgpu_bo *rbo = NULL;
struct amdgpu_bo *abo = NULL;
int ret;
unsigned long tmp;
@ -224,7 +224,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
return ret;
}
rbo = gem_to_amdgpu_bo(gobj);
abo = gem_to_amdgpu_bo(gobj);
/* okay we have an object now allocate the framebuffer */
info = drm_fb_helper_alloc_fbi(helper);
@ -247,7 +247,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
/* setup helper */
rfbdev->helper.fb = fb;
memset_io(rbo->kptr, 0x0, amdgpu_bo_size(rbo));
memset_io(abo->kptr, 0x0, amdgpu_bo_size(abo));
strcpy(info->fix.id, "amdgpudrmfb");
@ -256,11 +256,11 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &amdgpufb_ops;
tmp = amdgpu_bo_gpu_offset(rbo) - adev->mc.vram_start;
tmp = amdgpu_bo_gpu_offset(abo) - adev->mc.vram_start;
info->fix.smem_start = adev->mc.aper_base + tmp;
info->fix.smem_len = amdgpu_bo_size(rbo);
info->screen_base = rbo->kptr;
info->screen_size = amdgpu_bo_size(rbo);
info->fix.smem_len = amdgpu_bo_size(abo);
info->screen_base = abo->kptr;
info->screen_size = amdgpu_bo_size(abo);
drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
@ -277,7 +277,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
DRM_INFO("vram apper at 0x%lX\n", (unsigned long)adev->mc.aper_base);
DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(rbo));
DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(abo));
DRM_INFO("fb depth is %d\n", fb->depth);
DRM_INFO(" pitch is %d\n", fb->pitches[0]);
@ -287,7 +287,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
out_destroy_fbi:
drm_fb_helper_release_fbi(helper);
out_unref:
if (rbo) {
if (abo) {
}
if (fb && ret) {

View File

@ -118,23 +118,23 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev)
*/
int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
{
struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj);
struct amdgpu_device *adev = rbo->adev;
struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
struct amdgpu_device *adev = abo->adev;
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va *bo_va;
int r;
r = amdgpu_bo_reserve(rbo, false);
r = amdgpu_bo_reserve(abo, false);
if (r)
return r;
bo_va = amdgpu_vm_bo_find(vm, rbo);
bo_va = amdgpu_vm_bo_find(vm, abo);
if (!bo_va) {
bo_va = amdgpu_vm_bo_add(adev, vm, rbo);
bo_va = amdgpu_vm_bo_add(adev, vm, abo);
} else {
++bo_va->ref_count;
}
amdgpu_bo_unreserve(rbo);
amdgpu_bo_unreserve(abo);
return 0;
}
@ -547,7 +547,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_gem_object *gobj;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct amdgpu_bo *rbo;
struct amdgpu_bo *abo;
struct amdgpu_bo_va *bo_va;
struct ttm_validate_buffer tv, tv_pd;
struct ww_acquire_ctx ticket;
@ -587,10 +587,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
gobj = drm_gem_object_lookup(filp, args->handle);
if (gobj == NULL)
return -ENOENT;
rbo = gem_to_amdgpu_bo(gobj);
abo = gem_to_amdgpu_bo(gobj);
INIT_LIST_HEAD(&list);
INIT_LIST_HEAD(&duplicates);
tv.bo = &rbo->tbo;
tv.bo = &abo->tbo;
tv.shared = true;
list_add(&tv.head, &list);
@ -604,7 +604,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
return r;
}
bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo);
bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
if (!bo_va) {
ttm_eu_backoff_reservation(&ticket, &list);
drm_gem_object_unreference_unlocked(gobj);

View File

@ -203,10 +203,10 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
placement->busy_placement = places;
}
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain)
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
{
amdgpu_ttm_placement_init(rbo->adev, &rbo->placement,
rbo->placements, domain, rbo->flags);
amdgpu_ttm_placement_init(abo->adev, &abo->placement,
abo->placements, domain, abo->flags);
}
static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
@ -849,23 +849,23 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem)
{
struct amdgpu_bo *rbo;
struct amdgpu_bo *abo;
struct ttm_mem_reg *old_mem = &bo->mem;
if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
return;
rbo = container_of(bo, struct amdgpu_bo, tbo);
amdgpu_vm_bo_invalidate(rbo->adev, rbo);
abo = container_of(bo, struct amdgpu_bo, tbo);
amdgpu_vm_bo_invalidate(abo->adev, abo);
/* update statistics */
if (!new_mem)
return;
/* move_notify is called before move happens */
amdgpu_update_memory_usage(rbo->adev, &bo->mem, new_mem);
amdgpu_update_memory_usage(abo->adev, &bo->mem, new_mem);
trace_amdgpu_ttm_bo_move(rbo, new_mem->mem_type, old_mem->mem_type);
trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
}
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)

View File

@ -195,7 +195,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
struct amdgpu_bo *rbo;
struct amdgpu_bo *abo;
static struct ttm_place placements = {
.fpfn = 0,
.lpfn = 0,
@ -210,43 +210,43 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
placement->num_busy_placement = 1;
return;
}
rbo = container_of(bo, struct amdgpu_bo, tbo);
abo = container_of(bo, struct amdgpu_bo, tbo);
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
if (rbo->adev->mman.buffer_funcs_ring->ready == false) {
amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_CPU);
if (abo->adev->mman.buffer_funcs_ring->ready == false) {
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
} else {
amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_GTT);
for (i = 0; i < rbo->placement.num_placement; ++i) {
if (!(rbo->placements[i].flags &
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
for (i = 0; i < abo->placement.num_placement; ++i) {
if (!(abo->placements[i].flags &
TTM_PL_FLAG_TT))
continue;
if (rbo->placements[i].lpfn)
if (abo->placements[i].lpfn)
continue;
/* set an upper limit to force directly
* allocating address space for the BO.
*/
rbo->placements[i].lpfn =
rbo->adev->mc.gtt_size >> PAGE_SHIFT;
abo->placements[i].lpfn =
abo->adev->mc.gtt_size >> PAGE_SHIFT;
}
}
break;
case TTM_PL_TT:
default:
amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_CPU);
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
}
*placement = rbo->placement;
*placement = abo->placement;
}
static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
struct amdgpu_bo *rbo = container_of(bo, struct amdgpu_bo, tbo);
struct amdgpu_bo *abo = container_of(bo, struct amdgpu_bo, tbo);
if (amdgpu_ttm_tt_get_usermm(bo->ttm))
return -EPERM;
return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
return drm_vma_node_verify_access(&abo->gem_base.vma_node, filp);
}
static void amdgpu_move_null(struct ttm_buffer_object *bo,

View File

@ -351,12 +351,12 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
}
}
static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *rbo)
static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo)
{
int i;
for (i = 0; i < rbo->placement.num_placement; ++i) {
rbo->placements[i].fpfn = 0 >> PAGE_SHIFT;
rbo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
for (i = 0; i < abo->placement.num_placement; ++i) {
abo->placements[i].fpfn = 0 >> PAGE_SHIFT;
abo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
}
}

View File

@ -2107,7 +2107,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
struct amdgpu_framebuffer *amdgpu_fb;
struct drm_framebuffer *target_fb;
struct drm_gem_object *obj;
struct amdgpu_bo *rbo;
struct amdgpu_bo *abo;
uint64_t fb_location, tiling_flags;
uint32_t fb_format, fb_pitch_pixels;
u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE);
@ -2134,23 +2134,23 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
* just update base pointers
*/
obj = amdgpu_fb->obj;
rbo = gem_to_amdgpu_bo(obj);
r = amdgpu_bo_reserve(rbo, false);
abo = gem_to_amdgpu_bo(obj);
r = amdgpu_bo_reserve(abo, false);
if (unlikely(r != 0))
return r;
if (atomic) {
fb_location = amdgpu_bo_gpu_offset(rbo);
fb_location = amdgpu_bo_gpu_offset(abo);
} else {
r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
if (unlikely(r != 0)) {
amdgpu_bo_unreserve(rbo);
amdgpu_bo_unreserve(abo);
return -EINVAL;
}
}
amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
amdgpu_bo_unreserve(rbo);
amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
amdgpu_bo_unreserve(abo);
pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
@ -2324,12 +2324,12 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
if (!atomic && fb && fb != crtc->primary->fb) {
amdgpu_fb = to_amdgpu_framebuffer(fb);
rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(rbo, false);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(abo, false);
if (unlikely(r != 0))
return r;
amdgpu_bo_unpin(rbo);
amdgpu_bo_unreserve(rbo);
amdgpu_bo_unpin(abo);
amdgpu_bo_unreserve(abo);
}
/* Bytes per pixel may have changed */
@ -2809,16 +2809,16 @@ static void dce_v10_0_crtc_disable(struct drm_crtc *crtc)
if (crtc->primary->fb) {
int r;
struct amdgpu_framebuffer *amdgpu_fb;
struct amdgpu_bo *rbo;
struct amdgpu_bo *abo;
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(rbo, false);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(abo, false);
if (unlikely(r))
DRM_ERROR("failed to reserve rbo before unpin\n");
DRM_ERROR("failed to reserve abo before unpin\n");
else {
amdgpu_bo_unpin(rbo);
amdgpu_bo_unreserve(rbo);
amdgpu_bo_unpin(abo);
amdgpu_bo_unreserve(abo);
}
}
/* disable the GRPH */

View File

@ -2088,7 +2088,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
struct amdgpu_framebuffer *amdgpu_fb;
struct drm_framebuffer *target_fb;
struct drm_gem_object *obj;
struct amdgpu_bo *rbo;
struct amdgpu_bo *abo;
uint64_t fb_location, tiling_flags;
uint32_t fb_format, fb_pitch_pixels;
u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE);
@ -2115,23 +2115,23 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
* just update base pointers
*/
obj = amdgpu_fb->obj;
rbo = gem_to_amdgpu_bo(obj);
r = amdgpu_bo_reserve(rbo, false);
abo = gem_to_amdgpu_bo(obj);
r = amdgpu_bo_reserve(abo, false);
if (unlikely(r != 0))
return r;
if (atomic) {
fb_location = amdgpu_bo_gpu_offset(rbo);
fb_location = amdgpu_bo_gpu_offset(abo);
} else {
r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
if (unlikely(r != 0)) {
amdgpu_bo_unreserve(rbo);
amdgpu_bo_unreserve(abo);
return -EINVAL;
}
}
amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
amdgpu_bo_unreserve(rbo);
amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
amdgpu_bo_unreserve(abo);
pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
@ -2305,12 +2305,12 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
if (!atomic && fb && fb != crtc->primary->fb) {
amdgpu_fb = to_amdgpu_framebuffer(fb);
rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(rbo, false);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(abo, false);
if (unlikely(r != 0))
return r;
amdgpu_bo_unpin(rbo);
amdgpu_bo_unreserve(rbo);
amdgpu_bo_unpin(abo);
amdgpu_bo_unreserve(abo);
}
/* Bytes per pixel may have changed */
@ -2825,16 +2825,16 @@ static void dce_v11_0_crtc_disable(struct drm_crtc *crtc)
if (crtc->primary->fb) {
int r;
struct amdgpu_framebuffer *amdgpu_fb;
struct amdgpu_bo *rbo;
struct amdgpu_bo *abo;
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(rbo, false);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(abo, false);
if (unlikely(r))
DRM_ERROR("failed to reserve rbo before unpin\n");
DRM_ERROR("failed to reserve abo before unpin\n");
else {
amdgpu_bo_unpin(rbo);
amdgpu_bo_unreserve(rbo);
amdgpu_bo_unpin(abo);
amdgpu_bo_unreserve(abo);
}
}
/* disable the GRPH */

View File

@ -1533,7 +1533,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
struct amdgpu_framebuffer *amdgpu_fb;
struct drm_framebuffer *target_fb;
struct drm_gem_object *obj;
struct amdgpu_bo *rbo;
struct amdgpu_bo *abo;
uint64_t fb_location, tiling_flags;
uint32_t fb_format, fb_pitch_pixels, pipe_config;
u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
@ -1560,23 +1560,23 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
* just update base pointers
*/
obj = amdgpu_fb->obj;
rbo = gem_to_amdgpu_bo(obj);
r = amdgpu_bo_reserve(rbo, false);
abo = gem_to_amdgpu_bo(obj);
r = amdgpu_bo_reserve(abo, false);
if (unlikely(r != 0))
return r;
if (atomic)
fb_location = amdgpu_bo_gpu_offset(rbo);
fb_location = amdgpu_bo_gpu_offset(abo);
else {
r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
if (unlikely(r != 0)) {
amdgpu_bo_unreserve(rbo);
amdgpu_bo_unreserve(abo);
return -EINVAL;
}
}
amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
amdgpu_bo_unreserve(rbo);
amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
amdgpu_bo_unreserve(abo);
switch (target_fb->pixel_format) {
case DRM_FORMAT_C8:
@ -1728,12 +1728,12 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
if (!atomic && fb && fb != crtc->primary->fb) {
amdgpu_fb = to_amdgpu_framebuffer(fb);
rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(rbo, false);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(abo, false);
if (unlikely(r != 0))
return r;
amdgpu_bo_unpin(rbo);
amdgpu_bo_unreserve(rbo);
amdgpu_bo_unpin(abo);
amdgpu_bo_unreserve(abo);
}
/* Bytes per pixel may have changed */
@ -2181,16 +2181,16 @@ static void dce_v6_0_crtc_disable(struct drm_crtc *crtc)
if (crtc->primary->fb) {
int r;
struct amdgpu_framebuffer *amdgpu_fb;
struct amdgpu_bo *rbo;
struct amdgpu_bo *abo;
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(rbo, false);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(abo, false);
if (unlikely(r))
DRM_ERROR("failed to reserve rbo before unpin\n");
DRM_ERROR("failed to reserve abo before unpin\n");
else {
amdgpu_bo_unpin(rbo);
amdgpu_bo_unreserve(rbo);
amdgpu_bo_unpin(abo);
amdgpu_bo_unreserve(abo);
}
}
/* disable the GRPH */

View File

@ -2022,7 +2022,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
struct amdgpu_framebuffer *amdgpu_fb;
struct drm_framebuffer *target_fb;
struct drm_gem_object *obj;
struct amdgpu_bo *rbo;
struct amdgpu_bo *abo;
uint64_t fb_location, tiling_flags;
uint32_t fb_format, fb_pitch_pixels;
u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
@ -2049,23 +2049,23 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
* just update base pointers
*/
obj = amdgpu_fb->obj;
rbo = gem_to_amdgpu_bo(obj);
r = amdgpu_bo_reserve(rbo, false);
abo = gem_to_amdgpu_bo(obj);
r = amdgpu_bo_reserve(abo, false);
if (unlikely(r != 0))
return r;
if (atomic) {
fb_location = amdgpu_bo_gpu_offset(rbo);
fb_location = amdgpu_bo_gpu_offset(abo);
} else {
r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
if (unlikely(r != 0)) {
amdgpu_bo_unreserve(rbo);
amdgpu_bo_unreserve(abo);
return -EINVAL;
}
}
amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
amdgpu_bo_unreserve(rbo);
amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
amdgpu_bo_unreserve(abo);
pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
@ -2220,12 +2220,12 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
if (!atomic && fb && fb != crtc->primary->fb) {
amdgpu_fb = to_amdgpu_framebuffer(fb);
rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(rbo, false);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(abo, false);
if (unlikely(r != 0))
return r;
amdgpu_bo_unpin(rbo);
amdgpu_bo_unreserve(rbo);
amdgpu_bo_unpin(abo);
amdgpu_bo_unreserve(abo);
}
/* Bytes per pixel may have changed */
@ -2697,16 +2697,16 @@ static void dce_v8_0_crtc_disable(struct drm_crtc *crtc)
if (crtc->primary->fb) {
int r;
struct amdgpu_framebuffer *amdgpu_fb;
struct amdgpu_bo *rbo;
struct amdgpu_bo *abo;
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(rbo, false);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(abo, false);
if (unlikely(r))
DRM_ERROR("failed to reserve rbo before unpin\n");
DRM_ERROR("failed to reserve abo before unpin\n");
else {
amdgpu_bo_unpin(rbo);
amdgpu_bo_unreserve(rbo);
amdgpu_bo_unpin(abo);
amdgpu_bo_unreserve(abo);
}
}
/* disable the GRPH */

View File

@ -229,16 +229,16 @@ static void dce_virtual_crtc_disable(struct drm_crtc *crtc)
if (crtc->primary->fb) {
int r;
struct amdgpu_framebuffer *amdgpu_fb;
struct amdgpu_bo *rbo;
struct amdgpu_bo *abo;
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(rbo, false);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(abo, false);
if (unlikely(r))
DRM_ERROR("failed to reserve rbo before unpin\n");
DRM_ERROR("failed to reserve abo before unpin\n");
else {
amdgpu_bo_unpin(rbo);
amdgpu_bo_unreserve(rbo);
amdgpu_bo_unpin(abo);
amdgpu_bo_unreserve(abo);
}
}