drm/amdgpu: remove mclk_lock

Not needed any more.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Christian König 2015-05-27 10:22:47 +02:00 committed by Alex Deucher
parent 8dacc127fc
commit e176fe176d
5 changed files with 4 additions and 43 deletions

View File

@ -1558,8 +1558,6 @@ struct amdgpu_dpm {
struct amdgpu_pm {
struct mutex mutex;
/* write locked while reprogramming mclk */
struct rw_semaphore mclk_lock;
u32 current_sclk;
u32 current_mclk;
u32 default_sclk;

View File

@ -1401,7 +1401,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->gfx.gpu_clock_mutex);
mutex_init(&adev->srbm_mutex);
mutex_init(&adev->grbm_idx_mutex);
init_rwsem(&adev->pm.mclk_lock);
init_rwsem(&adev->exclusive_lock);
mutex_init(&adev->mn_lock);
hash_init(adev->mn_hash);

View File

@ -272,11 +272,9 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
bo->flags = flags;
amdgpu_fill_placement_to_bo(bo, placement);
/* Kernel allocation are uninterruptible */
down_read(&adev->pm.mclk_lock);
r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
&bo->placement, page_align, !kernel, NULL,
acc_size, sg, NULL, &amdgpu_ttm_bo_destroy);
up_read(&adev->pm.mclk_lock);
if (unlikely(r != 0)) {
return r;
}

View File

@ -581,7 +581,6 @@ force:
}
mutex_lock(&adev->ddev->struct_mutex);
down_write(&adev->pm.mclk_lock);
mutex_lock(&adev->ring_lock);
/* update whether vce is active */
@ -629,7 +628,6 @@ force:
done:
mutex_unlock(&adev->ring_lock);
up_write(&adev->pm.mclk_lock);
mutex_unlock(&adev->ddev->struct_mutex);
}

View File

@ -966,52 +966,20 @@ void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size)
man->size = size >> PAGE_SHIFT;
}
static struct vm_operations_struct amdgpu_ttm_vm_ops;
static const struct vm_operations_struct *ttm_vm_ops = NULL;
static int amdgpu_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct ttm_buffer_object *bo;
struct amdgpu_device *adev;
int r;
bo = (struct ttm_buffer_object *)vma->vm_private_data;
if (bo == NULL) {
return VM_FAULT_NOPAGE;
}
adev = amdgpu_get_adev(bo->bdev);
down_read(&adev->pm.mclk_lock);
r = ttm_vm_ops->fault(vma, vmf);
up_read(&adev->pm.mclk_lock);
return r;
}
int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *file_priv;
struct amdgpu_device *adev;
int r;
if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
return -EINVAL;
}
file_priv = filp->private_data;
adev = file_priv->minor->dev->dev_private;
if (adev == NULL) {
if (adev == NULL)
return -EINVAL;
}
r = ttm_bo_mmap(filp, vma, &adev->mman.bdev);
if (unlikely(r != 0)) {
return r;
}
if (unlikely(ttm_vm_ops == NULL)) {
ttm_vm_ops = vma->vm_ops;
amdgpu_ttm_vm_ops = *ttm_vm_ops;
amdgpu_ttm_vm_ops.fault = &amdgpu_ttm_fault;
}
vma->vm_ops = &amdgpu_ttm_vm_ops;
return 0;
return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
}
int amdgpu_copy_buffer(struct amdgpu_ring *ring,