mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-12 21:57:43 +00:00
amd-iommu: handle exlusion ranges and unity mappings in alloc_new_range
This patch makes sure no reserved addresses are allocated in an dma_ops domain when the aperture is increased dynamically. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
This commit is contained in:
parent
9cabe89b99
commit
00cd122ae5
1 changed files with 60 additions and 11 deletions
|
@ -58,6 +58,9 @@ static struct dma_ops_domain *find_protection_domain(u16 devid);
|
||||||
static u64* alloc_pte(struct protection_domain *dom,
|
static u64* alloc_pte(struct protection_domain *dom,
|
||||||
unsigned long address, u64
|
unsigned long address, u64
|
||||||
**pte_page, gfp_t gfp);
|
**pte_page, gfp_t gfp);
|
||||||
|
static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
|
||||||
|
unsigned long start_page,
|
||||||
|
unsigned int pages);
|
||||||
|
|
||||||
#ifdef CONFIG_AMD_IOMMU_STATS
|
#ifdef CONFIG_AMD_IOMMU_STATS
|
||||||
|
|
||||||
|
@ -620,15 +623,43 @@ static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
|
||||||
* called with domain->lock held
|
* called with domain->lock held
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This function checks if there is a PTE for a given dma address. If
|
||||||
|
* there is one, it returns the pointer to it.
|
||||||
|
*/
|
||||||
|
static u64* fetch_pte(struct protection_domain *domain,
|
||||||
|
unsigned long address)
|
||||||
|
{
|
||||||
|
u64 *pte;
|
||||||
|
|
||||||
|
pte = &domain->pt_root[IOMMU_PTE_L2_INDEX(address)];
|
||||||
|
|
||||||
|
if (!IOMMU_PTE_PRESENT(*pte))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
pte = IOMMU_PTE_PAGE(*pte);
|
||||||
|
pte = &pte[IOMMU_PTE_L1_INDEX(address)];
|
||||||
|
|
||||||
|
if (!IOMMU_PTE_PRESENT(*pte))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
pte = IOMMU_PTE_PAGE(*pte);
|
||||||
|
pte = &pte[IOMMU_PTE_L0_INDEX(address)];
|
||||||
|
|
||||||
|
return pte;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This function is used to add a new aperture range to an existing
|
* This function is used to add a new aperture range to an existing
|
||||||
* aperture in case of dma_ops domain allocation or address allocation
|
* aperture in case of dma_ops domain allocation or address allocation
|
||||||
* failure.
|
* failure.
|
||||||
*/
|
*/
|
||||||
static int alloc_new_range(struct dma_ops_domain *dma_dom,
|
static int alloc_new_range(struct amd_iommu *iommu,
|
||||||
|
struct dma_ops_domain *dma_dom,
|
||||||
bool populate, gfp_t gfp)
|
bool populate, gfp_t gfp)
|
||||||
{
|
{
|
||||||
int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT;
|
int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT;
|
||||||
|
int i;
|
||||||
|
|
||||||
if (index >= APERTURE_MAX_RANGES)
|
if (index >= APERTURE_MAX_RANGES)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -662,6 +693,33 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom,
|
||||||
|
|
||||||
dma_dom->aperture_size += APERTURE_RANGE_SIZE;
|
dma_dom->aperture_size += APERTURE_RANGE_SIZE;
|
||||||
|
|
||||||
|
/* Intialize the exclusion range if necessary */
|
||||||
|
if (iommu->exclusion_start &&
|
||||||
|
iommu->exclusion_start >= dma_dom->aperture[index]->offset &&
|
||||||
|
iommu->exclusion_start < dma_dom->aperture_size) {
|
||||||
|
unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT;
|
||||||
|
int pages = iommu_num_pages(iommu->exclusion_start,
|
||||||
|
iommu->exclusion_length,
|
||||||
|
PAGE_SIZE);
|
||||||
|
dma_ops_reserve_addresses(dma_dom, startpage, pages);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Check for areas already mapped as present in the new aperture
|
||||||
|
* range and mark those pages as reserved in the allocator. Such
|
||||||
|
* mappings may already exist as a result of requested unity
|
||||||
|
* mappings for devices.
|
||||||
|
*/
|
||||||
|
for (i = dma_dom->aperture[index]->offset;
|
||||||
|
i < dma_dom->aperture_size;
|
||||||
|
i += PAGE_SIZE) {
|
||||||
|
u64 *pte = fetch_pte(&dma_dom->domain, i);
|
||||||
|
if (!pte || !IOMMU_PTE_PRESENT(*pte))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
dma_ops_reserve_addresses(dma_dom, i << PAGE_SHIFT, 1);
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_free:
|
out_free:
|
||||||
|
@ -911,7 +969,7 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu,
|
||||||
dma_dom->need_flush = false;
|
dma_dom->need_flush = false;
|
||||||
dma_dom->target_dev = 0xffff;
|
dma_dom->target_dev = 0xffff;
|
||||||
|
|
||||||
if (alloc_new_range(dma_dom, true, GFP_KERNEL))
|
if (alloc_new_range(iommu, dma_dom, true, GFP_KERNEL))
|
||||||
goto free_dma_dom;
|
goto free_dma_dom;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -921,15 +979,6 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu,
|
||||||
dma_dom->aperture[0]->bitmap[0] = 1;
|
dma_dom->aperture[0]->bitmap[0] = 1;
|
||||||
dma_dom->next_address = 0;
|
dma_dom->next_address = 0;
|
||||||
|
|
||||||
/* Intialize the exclusion range if necessary */
|
|
||||||
if (iommu->exclusion_start &&
|
|
||||||
iommu->exclusion_start < dma_dom->aperture_size) {
|
|
||||||
unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT;
|
|
||||||
int pages = iommu_num_pages(iommu->exclusion_start,
|
|
||||||
iommu->exclusion_length,
|
|
||||||
PAGE_SIZE);
|
|
||||||
dma_ops_reserve_addresses(dma_dom, startpage, pages);
|
|
||||||
}
|
|
||||||
|
|
||||||
return dma_dom;
|
return dma_dom;
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue