amd-drm-fixes-6.9-2024-04-17:

amdgpu:
 - Fix invalid resource->start check
 - USB-C DSC fix
 - Fix a potential UAF in VA IOCTL
 - Fix visible VRAM handling during faults
 
 amdkfd:
 - Fix memory leak in create_process failure
 
 radeon:
 - Silence UBSAN warnings from variable sized arrays
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQQgO5Idg2tXNTSZAr293/aFa7yZ2AUCZiAjZQAKCRC93/aFa7yZ
 2APcAPoCF3CYExtVoNC6Tt7ExZ2Q/PEJjcBFxxLGHMrVGdioCAD/QUUIOIf5XKQz
 lAXX0ytjyEfBifODffZgdMkMIK8ktg8=
 =HJiB
 -----END PGP SIGNATURE-----

Merge tag 'amd-drm-fixes-6.9-2024-04-17' of https://gitlab.freedesktop.org/agd5f/linux into drm-fixes

amd-drm-fixes-6.9-2024-04-17:

amdgpu:
- Fix invalid resource->start check
- USB-C DSC fix
- Fix a potential UAF in VA IOCTL
- Fix visible VRAM handling during faults

amdkfd:
- Fix memory leak in create_process failure

radeon:
- Silence UBSAN warnings from variable sized arrays

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240417194959.3716998-1-alexander.deucher@amd.com
This commit is contained in:
Dave Airlie 2024-04-19 07:51:31 +10:00
commit 58292f516b
11 changed files with 123 additions and 109 deletions

View File

@ -819,7 +819,7 @@ retry:
p->bytes_moved += ctx.bytes_moved; p->bytes_moved += ctx.bytes_moved;
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
amdgpu_bo_in_cpu_visible_vram(bo)) amdgpu_res_cpu_visible(adev, bo->tbo.resource))
p->bytes_moved_vis += ctx.bytes_moved; p->bytes_moved_vis += ctx.bytes_moved;
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {

View File

@ -617,8 +617,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
return r; return r;
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
bo->tbo.resource->mem_type == TTM_PL_VRAM && amdgpu_res_cpu_visible(adev, bo->tbo.resource))
amdgpu_bo_in_cpu_visible_vram(bo))
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
ctx.bytes_moved); ctx.bytes_moved);
else else
@ -1272,23 +1271,25 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict)
void amdgpu_bo_get_memory(struct amdgpu_bo *bo, void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
struct amdgpu_mem_stats *stats) struct amdgpu_mem_stats *stats)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct ttm_resource *res = bo->tbo.resource;
uint64_t size = amdgpu_bo_size(bo); uint64_t size = amdgpu_bo_size(bo);
struct drm_gem_object *obj; struct drm_gem_object *obj;
unsigned int domain; unsigned int domain;
bool shared; bool shared;
/* Abort if the BO doesn't currently have a backing store */ /* Abort if the BO doesn't currently have a backing store */
if (!bo->tbo.resource) if (!res)
return; return;
obj = &bo->tbo.base; obj = &bo->tbo.base;
shared = drm_gem_object_is_shared_for_memory_stats(obj); shared = drm_gem_object_is_shared_for_memory_stats(obj);
domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type); domain = amdgpu_mem_type_to_domain(res->mem_type);
switch (domain) { switch (domain) {
case AMDGPU_GEM_DOMAIN_VRAM: case AMDGPU_GEM_DOMAIN_VRAM:
stats->vram += size; stats->vram += size;
if (amdgpu_bo_in_cpu_visible_vram(bo)) if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
stats->visible_vram += size; stats->visible_vram += size;
if (shared) if (shared)
stats->vram_shared += size; stats->vram_shared += size;
@ -1389,10 +1390,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
/* Remember that this BO was accessed by the CPU */ /* Remember that this BO was accessed by the CPU */
abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
if (bo->resource->mem_type != TTM_PL_VRAM) if (amdgpu_res_cpu_visible(adev, bo->resource))
return 0;
if (amdgpu_bo_in_cpu_visible_vram(abo))
return 0; return 0;
/* Can't move a pinned BO to visible VRAM */ /* Can't move a pinned BO to visible VRAM */
@ -1415,7 +1413,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
/* this should never happen */ /* this should never happen */
if (bo->resource->mem_type == TTM_PL_VRAM && if (bo->resource->mem_type == TTM_PL_VRAM &&
!amdgpu_bo_in_cpu_visible_vram(abo)) !amdgpu_res_cpu_visible(adev, bo->resource))
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
ttm_bo_move_to_lru_tail_unlocked(bo); ttm_bo_move_to_lru_tail_unlocked(bo);
@ -1579,6 +1577,7 @@ uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
*/ */
u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m) u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct dma_buf_attachment *attachment; struct dma_buf_attachment *attachment;
struct dma_buf *dma_buf; struct dma_buf *dma_buf;
const char *placement; const char *placement;
@ -1587,10 +1586,11 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
if (dma_resv_trylock(bo->tbo.base.resv)) { if (dma_resv_trylock(bo->tbo.base.resv)) {
unsigned int domain; unsigned int domain;
domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type); domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
switch (domain) { switch (domain) {
case AMDGPU_GEM_DOMAIN_VRAM: case AMDGPU_GEM_DOMAIN_VRAM:
if (amdgpu_bo_in_cpu_visible_vram(bo)) if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
placement = "VRAM VISIBLE"; placement = "VRAM VISIBLE";
else else
placement = "VRAM"; placement = "VRAM";

View File

@ -250,28 +250,6 @@ static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
return drm_vma_node_offset_addr(&bo->tbo.base.vma_node); return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
} }
/**
* amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM
*/
static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_res_cursor cursor;
if (!bo->tbo.resource || bo->tbo.resource->mem_type != TTM_PL_VRAM)
return false;
amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
while (cursor.remaining) {
if (cursor.start < adev->gmc.visible_vram_size)
return true;
amdgpu_res_next(&cursor, cursor.size);
}
return false;
}
/** /**
* amdgpu_bo_explicit_sync - return whether the bo is explicitly synced * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
*/ */

View File

@ -133,7 +133,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
} else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
!(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) && !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
amdgpu_bo_in_cpu_visible_vram(abo)) { amdgpu_res_cpu_visible(adev, bo->resource)) {
/* Try evicting to the CPU inaccessible part of VRAM /* Try evicting to the CPU inaccessible part of VRAM
* first, but only set GTT as busy placement, so this * first, but only set GTT as busy placement, so this
@ -403,40 +403,55 @@ error:
return r; return r;
} }
/**
* amdgpu_res_cpu_visible - Check that resource can be accessed by CPU
* @adev: amdgpu device
* @res: the resource to check
*
* Returns: true if the full resource is CPU visible, false otherwise.
*/
bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
struct ttm_resource *res)
{
struct amdgpu_res_cursor cursor;
if (!res)
return false;
if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT ||
res->mem_type == AMDGPU_PL_PREEMPT)
return true;
if (res->mem_type != TTM_PL_VRAM)
return false;
amdgpu_res_first(res, 0, res->size, &cursor);
while (cursor.remaining) {
if ((cursor.start + cursor.size) >= adev->gmc.visible_vram_size)
return false;
amdgpu_res_next(&cursor, cursor.size);
}
return true;
}
/* /*
* amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy * amdgpu_res_copyable - Check that memory can be accessed by ttm_bo_move_memcpy
* *
* Called by amdgpu_bo_move() * Called by amdgpu_bo_move()
*/ */
static bool amdgpu_mem_visible(struct amdgpu_device *adev, static bool amdgpu_res_copyable(struct amdgpu_device *adev,
struct ttm_resource *mem) struct ttm_resource *mem)
{ {
u64 mem_size = (u64)mem->size; if (!amdgpu_res_cpu_visible(adev, mem))
struct amdgpu_res_cursor cursor;
u64 end;
if (mem->mem_type == TTM_PL_SYSTEM ||
mem->mem_type == TTM_PL_TT)
return true;
if (mem->mem_type != TTM_PL_VRAM)
return false; return false;
amdgpu_res_first(mem, 0, mem_size, &cursor); /* ttm_resource_ioremap only supports contiguous memory */
end = cursor.start + cursor.size; if (mem->mem_type == TTM_PL_VRAM &&
while (cursor.remaining) { !(mem->placement & TTM_PL_FLAG_CONTIGUOUS))
amdgpu_res_next(&cursor, cursor.size); return false;
if (!cursor.remaining) return true;
break;
/* ttm_resource_ioremap only supports contiguous memory */
if (end != cursor.start)
return false;
end = cursor.start + cursor.size;
}
return end <= adev->gmc.visible_vram_size;
} }
/* /*
@ -529,8 +544,8 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
if (r) { if (r) {
/* Check that all memory is CPU accessible */ /* Check that all memory is CPU accessible */
if (!amdgpu_mem_visible(adev, old_mem) || if (!amdgpu_res_copyable(adev, old_mem) ||
!amdgpu_mem_visible(adev, new_mem)) { !amdgpu_res_copyable(adev, new_mem)) {
pr_err("Move buffer fallback to memcpy unavailable\n"); pr_err("Move buffer fallback to memcpy unavailable\n");
return r; return r;
} }
@ -557,7 +572,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
struct ttm_resource *mem) struct ttm_resource *mem)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
size_t bus_size = (size_t)mem->size;
switch (mem->mem_type) { switch (mem->mem_type) {
case TTM_PL_SYSTEM: case TTM_PL_SYSTEM:
@ -568,9 +582,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
break; break;
case TTM_PL_VRAM: case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT; mem->bus.offset = mem->start << PAGE_SHIFT;
/* check if it's visible */
if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size)
return -EINVAL;
if (adev->mman.aper_base_kaddr && if (adev->mman.aper_base_kaddr &&
mem->placement & TTM_PL_FLAG_CONTIGUOUS) mem->placement & TTM_PL_FLAG_CONTIGUOUS)

View File

@ -139,6 +139,9 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr, int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
uint64_t start); uint64_t start);
bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
struct ttm_resource *res);
int amdgpu_ttm_init(struct amdgpu_device *adev); int amdgpu_ttm_init(struct amdgpu_device *adev);
void amdgpu_ttm_fini(struct amdgpu_device *adev); void amdgpu_ttm_fini(struct amdgpu_device *adev);
void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev,

View File

@ -1613,6 +1613,37 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
trace_amdgpu_vm_bo_map(bo_va, mapping); trace_amdgpu_vm_bo_map(bo_va, mapping);
} }
/* Validate operation parameters to prevent potential abuse */
static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
struct amdgpu_bo *bo,
uint64_t saddr,
uint64_t offset,
uint64_t size)
{
uint64_t tmp, lpfn;
if (saddr & AMDGPU_GPU_PAGE_MASK
|| offset & AMDGPU_GPU_PAGE_MASK
|| size & AMDGPU_GPU_PAGE_MASK)
return -EINVAL;
if (check_add_overflow(saddr, size, &tmp)
|| check_add_overflow(offset, size, &tmp)
|| size == 0 /* which also leads to end < begin */)
return -EINVAL;
/* make sure object fit at this offset */
if (bo && offset + size > amdgpu_bo_size(bo))
return -EINVAL;
/* Ensure last pfn not exceed max_pfn */
lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
if (lpfn >= adev->vm_manager.max_pfn)
return -EINVAL;
return 0;
}
/** /**
* amdgpu_vm_bo_map - map bo inside a vm * amdgpu_vm_bo_map - map bo inside a vm
* *
@ -1639,21 +1670,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo *bo = bo_va->base.bo; struct amdgpu_bo *bo = bo_va->base.bo;
struct amdgpu_vm *vm = bo_va->base.vm; struct amdgpu_vm *vm = bo_va->base.vm;
uint64_t eaddr; uint64_t eaddr;
int r;
/* validate the parameters */ r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK) if (r)
return -EINVAL; return r;
if (saddr + size <= saddr || offset + size <= offset)
return -EINVAL;
/* make sure object fit at this offset */
eaddr = saddr + size - 1;
if ((bo && offset + size > amdgpu_bo_size(bo)) ||
(eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
return -EINVAL;
saddr /= AMDGPU_GPU_PAGE_SIZE; saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE; eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
if (tmp) { if (tmp) {
@ -1706,17 +1730,9 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
uint64_t eaddr; uint64_t eaddr;
int r; int r;
/* validate the parameters */ r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK) if (r)
return -EINVAL; return r;
if (saddr + size <= saddr || offset + size <= offset)
return -EINVAL;
/* make sure object fit at this offset */
eaddr = saddr + size - 1;
if ((bo && offset + size > amdgpu_bo_size(bo)) ||
(eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
return -EINVAL;
/* Allocate all the needed memory */ /* Allocate all the needed memory */
mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
@ -1730,7 +1746,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
} }
saddr /= AMDGPU_GPU_PAGE_SIZE; saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE; eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
mapping->start = saddr; mapping->start = saddr;
mapping->last = eaddr; mapping->last = eaddr;
@ -1817,10 +1833,14 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *before, *after, *tmp, *next; struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
LIST_HEAD(removed); LIST_HEAD(removed);
uint64_t eaddr; uint64_t eaddr;
int r;
r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
if (r)
return r;
eaddr = saddr + size - 1;
saddr /= AMDGPU_GPU_PAGE_SIZE; saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE; eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
/* Allocate all the needed memory */ /* Allocate all the needed memory */
before = kzalloc(sizeof(*before), GFP_KERNEL); before = kzalloc(sizeof(*before), GFP_KERNEL);

View File

@ -819,9 +819,9 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
mutex_lock(&kfd_processes_mutex); mutex_lock(&kfd_processes_mutex);
if (kfd_is_locked()) { if (kfd_is_locked()) {
mutex_unlock(&kfd_processes_mutex);
pr_debug("KFD is locked! Cannot create process"); pr_debug("KFD is locked! Cannot create process");
return ERR_PTR(-EINVAL); process = ERR_PTR(-EINVAL);
goto out;
} }
/* A prior open of /dev/kfd could have already created the process. */ /* A prior open of /dev/kfd could have already created the process. */

View File

@ -248,14 +248,12 @@ void dcn32_link_encoder_construct(
enc10->base.hpd_source = init_data->hpd_source; enc10->base.hpd_source = init_data->hpd_source;
enc10->base.connector = init_data->connector; enc10->base.connector = init_data->connector;
if (enc10->base.connector.id == CONNECTOR_ID_USBC)
enc10->base.features.flags.bits.DP_IS_USB_C = 1;
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
enc10->base.features = *enc_features; enc10->base.features = *enc_features;
if (enc10->base.connector.id == CONNECTOR_ID_USBC)
enc10->base.features.flags.bits.DP_IS_USB_C = 1;
if (enc10->base.connector.id == CONNECTOR_ID_USBC)
enc10->base.features.flags.bits.DP_IS_USB_C = 1;
enc10->base.transmitter = init_data->transmitter; enc10->base.transmitter = init_data->transmitter;

View File

@ -184,6 +184,8 @@ void dcn35_link_encoder_construct(
enc10->base.hpd_source = init_data->hpd_source; enc10->base.hpd_source = init_data->hpd_source;
enc10->base.connector = init_data->connector; enc10->base.connector = init_data->connector;
if (enc10->base.connector.id == CONNECTOR_ID_USBC)
enc10->base.features.flags.bits.DP_IS_USB_C = 1;
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
@ -238,8 +240,6 @@ void dcn35_link_encoder_construct(
} }
enc10->base.features.flags.bits.HDMI_6GB_EN = 1; enc10->base.features.flags.bits.HDMI_6GB_EN = 1;
if (enc10->base.connector.id == CONNECTOR_ID_USBC)
enc10->base.features.flags.bits.DP_IS_USB_C = 1;
if (bp_funcs->get_connector_speed_cap_info) if (bp_funcs->get_connector_speed_cap_info)
result = bp_funcs->get_connector_speed_cap_info(enc10->base.ctx->dc_bios, result = bp_funcs->get_connector_speed_cap_info(enc10->base.ctx->dc_bios,

View File

@ -424,7 +424,7 @@ typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{
typedef struct _ATOM_PPLIB_STATE_V2 typedef struct _ATOM_PPLIB_STATE_V2
{ {
//number of valid dpm levels in this state; Driver uses it to calculate the whole //number of valid dpm levels in this state; Driver uses it to calculate the whole
//size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR) //size of the state: struct_size(ATOM_PPLIB_STATE_V2, clockInfoIndex, ucNumDPMLevels)
UCHAR ucNumDPMLevels; UCHAR ucNumDPMLevels;
//a index to the array of nonClockInfos //a index to the array of nonClockInfos
@ -432,14 +432,14 @@ typedef struct _ATOM_PPLIB_STATE_V2
/** /**
* Driver will read the first ucNumDPMLevels in this array * Driver will read the first ucNumDPMLevels in this array
*/ */
UCHAR clockInfoIndex[1]; UCHAR clockInfoIndex[] __counted_by(ucNumDPMLevels);
} ATOM_PPLIB_STATE_V2; } ATOM_PPLIB_STATE_V2;
typedef struct _StateArray{ typedef struct _StateArray{
//how many states we have //how many states we have
UCHAR ucNumEntries; UCHAR ucNumEntries;
ATOM_PPLIB_STATE_V2 states[1]; ATOM_PPLIB_STATE_V2 states[] __counted_by(ucNumEntries);
}StateArray; }StateArray;
@ -450,7 +450,7 @@ typedef struct _ClockInfoArray{
//sizeof(ATOM_PPLIB_CLOCK_INFO) //sizeof(ATOM_PPLIB_CLOCK_INFO)
UCHAR ucEntrySize; UCHAR ucEntrySize;
UCHAR clockInfo[1]; UCHAR clockInfo[] __counted_by(ucNumEntries);
}ClockInfoArray; }ClockInfoArray;
typedef struct _NonClockInfoArray{ typedef struct _NonClockInfoArray{
@ -460,7 +460,7 @@ typedef struct _NonClockInfoArray{
//sizeof(ATOM_PPLIB_NONCLOCK_INFO) //sizeof(ATOM_PPLIB_NONCLOCK_INFO)
UCHAR ucEntrySize; UCHAR ucEntrySize;
ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1]; ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[] __counted_by(ucNumEntries);
}NonClockInfoArray; }NonClockInfoArray;
typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record

View File

@ -923,8 +923,12 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
max_device = ATOM_MAX_SUPPORTED_DEVICE_INFO; max_device = ATOM_MAX_SUPPORTED_DEVICE_INFO;
for (i = 0; i < max_device; i++) { for (i = 0; i < max_device; i++) {
ATOM_CONNECTOR_INFO_I2C ci = ATOM_CONNECTOR_INFO_I2C ci;
supported_devices->info.asConnInfo[i];
if (frev > 1)
ci = supported_devices->info_2d1.asConnInfo[i];
else
ci = supported_devices->info.asConnInfo[i];
bios_connectors[i].valid = false; bios_connectors[i].valid = false;