drm/amdgpu/amdgpu: use "*" adjacent to data name

When declaring pointer data, the "*" symbol should be used adjacent to
the data name as per the coding standards. This resolves following
issues reported by checkpatch script:
	ERROR: "foo *   bar" should be "foo *bar"
	ERROR: "foo * bar" should be "foo *bar"
	ERROR: "foo*            bar" should be "foo *bar"
	ERROR: "(foo*)" should be "(foo *)"

Signed-off-by: Deepak R Varma <mh12gx2825@gmail.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Deepak R Varma 2020-11-03 01:07:12 +05:30 committed by Alex Deucher
parent 8e607d7e27
commit c4c5ae67d1
9 changed files with 21 additions and 21 deletions

View file

@ -1401,7 +1401,7 @@ static ATOM_VOLTAGE_OBJECT_V3 *amdgpu_atombios_lookup_voltage_object_v3(ATOM_VOL
{
u32 size = le16_to_cpu(v3->sHeader.usStructureSize);
u32 offset = offsetof(ATOM_VOLTAGE_OBJECT_INFO_V3_1, asVoltageObj[0]);
u8 *start = (u8*)v3;
u8 *start = (u8 *)v3;
while (offset < size) {
ATOM_VOLTAGE_OBJECT_V3 *vo = (ATOM_VOLTAGE_OBJECT_V3 *)(start + offset);

View file

@ -70,7 +70,7 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
struct atom_context *ctx = adev->mode_info.atom_context;
int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
vram_usagebyfirmware);
struct vram_usagebyfirmware_v2_1 * firmware_usage;
struct vram_usagebyfirmware_v2_1 *firmware_usage;
uint32_t start_addr, size;
uint16_t data_offset;
int usage_bytes = 0;

View file

@ -1461,7 +1461,7 @@ int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
dma_fence_put(fence);
if (r)
return r;
r = drm_syncobj_get_fd(syncobj, (int*)&info->out.handle);
r = drm_syncobj_get_fd(syncobj, (int *)&info->out.handle);
drm_syncobj_put(syncobj);
return r;

View file

@ -450,7 +450,7 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
struct drm_sched_entity *entity,
struct dma_fence *fence, uint64_t* handle)
struct dma_fence *fence, uint64_t *handle)
{
struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
uint64_t seq = centity->sequence;

View file

@ -838,7 +838,7 @@ int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
struct ta_xgmi_shared_memory *xgmi_cmd;
int ret;
xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.xgmi_shared_buf;
memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
@ -858,7 +858,7 @@ int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
struct ta_xgmi_shared_memory *xgmi_cmd;
int ret;
xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.xgmi_shared_buf;
memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
@ -886,7 +886,7 @@ int psp_xgmi_get_topology_info(struct psp_context *psp,
if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
return -EINVAL;
xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.xgmi_shared_buf;
memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
/* Fill in the shared memory with topology information as input */
@ -930,7 +930,7 @@ int psp_xgmi_set_topology_info(struct psp_context *psp,
if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
return -EINVAL;
xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.xgmi_shared_buf;
memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
@ -994,7 +994,7 @@ static int psp_ras_load(struct psp_context *psp)
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
ras_cmd = (struct ta_ras_shared_memory*)psp->ras.ras_shared_buf;
ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
if (!ret) {
psp->ras.session_id = cmd->resp.session_id;
@ -1916,7 +1916,7 @@ static int psp_execute_np_fw_load(struct psp_context *psp,
static int psp_load_smu_fw(struct psp_context *psp)
{
int ret;
struct amdgpu_device* adev = psp->adev;
struct amdgpu_device *adev = psp->adev;
struct amdgpu_firmware_info *ucode =
&adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
struct amdgpu_ras *ras = psp->ras.ras;
@ -1982,7 +1982,7 @@ static int psp_np_fw_load(struct psp_context *psp)
{
int i, ret;
struct amdgpu_firmware_info *ucode;
struct amdgpu_device* adev = psp->adev;
struct amdgpu_device *adev = psp->adev;
if (psp->autoload_supported &&
!psp->pmfw_centralized_cstate_management) {

View file

@ -396,7 +396,7 @@ static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
return result;
value = ring->ring[(*pos - 12)/4];
r = put_user(value, (uint32_t*)buf);
r = put_user(value, (uint32_t *)buf);
if (r)
return r;
buf += 4;

View file

@ -157,10 +157,10 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
i, *vram_start, gart_start,
(unsigned long long)
(gart_addr - adev->gmc.gart_start +
(void*)gart_start - gtt_map),
(void *)gart_start - gtt_map),
(unsigned long long)
(vram_addr - adev->gmc.vram_start +
(void*)gart_start - gtt_map));
(void *)gart_start - gtt_map));
amdgpu_bo_kunmap(vram_obj);
goto out_lclean_unpin;
}
@ -203,10 +203,10 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
i, *gart_start, vram_start,
(unsigned long long)
(vram_addr - adev->gmc.vram_start +
(void*)vram_start - vram_map),
(void *)vram_start - vram_map),
(unsigned long long)
(gart_addr - adev->gmc.gart_start +
(void*)vram_start - vram_map));
(void *)vram_start - vram_map));
amdgpu_bo_kunmap(gtt_obj[i]);
goto out_lclean_unpin;
}

View file

@ -1115,7 +1115,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
struct ttm_resource *bo_mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void*)ttm;
struct amdgpu_ttm_tt *gtt = (void *)ttm;
uint64_t flags;
int r = 0;
@ -1167,7 +1167,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_ttm_tt *gtt = (void*)bo->ttm;
struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
struct ttm_resource tmp;
struct ttm_placement placement;
struct ttm_place placements;
@ -1427,7 +1427,7 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
return -ENOMEM;
}
gtt = (void*)bo->ttm;
gtt = (void *)bo->ttm;
gtt->userptr = addr;
gtt->userflags = flags;

View file

@ -588,8 +588,8 @@ static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode,
{
const struct gfx_firmware_header_v1_0 *header = NULL;
const struct common_firmware_header *comm_hdr = NULL;
uint8_t* src_addr = NULL;
uint8_t* dst_addr = NULL;
uint8_t *src_addr = NULL;
uint8_t *dst_addr = NULL;
if (NULL == ucode->fw)
return 0;