Merge branch 'iommu/iommu-priv' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux into arm/core

This commit is contained in:
Joerg Roedel 2017-01-30 16:05:18 +01:00
commit ce273db0ff
12 changed files with 82 additions and 48 deletions

View file

@ -143,3 +143,13 @@ So, this provides a way for drivers to avoid those error messages on calls
where allocation failures are not a problem, and shouldn't bother the logs.
NOTE: At the moment DMA_ATTR_NO_WARN is only implemented on PowerPC.
DMA_ATTR_PRIVILEGED
------------------------------
Some advanced peripherals such as remote processors and GPUs perform
accesses to DMA buffers in both privileged "supervisor" and unprivileged
"user" modes. This attribute is used to indicate to the DMA-mapping
subsystem that the buffer is fully accessible at the elevated privilege
level (and ideally inaccessible or at least read-only at the
lesser-privileged levels).

View file

@ -1171,6 +1171,25 @@ core_initcall(dma_debug_do_init);
#ifdef CONFIG_ARM_DMA_USE_IOMMU
static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs)
{
int prot = 0;
if (attrs & DMA_ATTR_PRIVILEGED)
prot |= IOMMU_PRIV;
switch (dir) {
case DMA_BIDIRECTIONAL:
return prot | IOMMU_READ | IOMMU_WRITE;
case DMA_TO_DEVICE:
return prot | IOMMU_READ;
case DMA_FROM_DEVICE:
return prot | IOMMU_WRITE;
default:
return prot;
}
}
/* IOMMU */
static int extend_iommu_mapping(struct dma_iommu_mapping *mapping);
@ -1394,7 +1413,8 @@ __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
* Create a mapping in device IO address space for specified pages
*/
static dma_addr_t
__iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
__iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
unsigned long attrs)
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
@ -1419,7 +1439,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
len = (j - i) << PAGE_SHIFT;
ret = iommu_map(mapping->domain, iova, phys, len,
IOMMU_READ|IOMMU_WRITE);
__dma_info_to_prot(DMA_BIDIRECTIONAL, attrs));
if (ret < 0)
goto fail;
iova += len;
@ -1476,7 +1496,8 @@ static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs)
}
static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
dma_addr_t *handle, int coherent_flag)
dma_addr_t *handle, int coherent_flag,
unsigned long attrs)
{
struct page *page;
void *addr;
@ -1488,7 +1509,7 @@ static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
if (!addr)
return NULL;
*handle = __iommu_create_mapping(dev, &page, size);
*handle = __iommu_create_mapping(dev, &page, size, attrs);
if (*handle == DMA_ERROR_CODE)
goto err_mapping;
@ -1522,7 +1543,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp))
return __iommu_alloc_simple(dev, size, gfp, handle,
coherent_flag);
coherent_flag, attrs);
/*
* Following is a work-around (a.k.a. hack) to prevent pages
@ -1537,7 +1558,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
if (!pages)
return NULL;
*handle = __iommu_create_mapping(dev, pages, size);
*handle = __iommu_create_mapping(dev, pages, size, attrs);
if (*handle == DMA_ERROR_CODE)
goto err_buffer;
@ -1672,27 +1693,6 @@ static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
GFP_KERNEL);
}
static int __dma_direction_to_prot(enum dma_data_direction dir)
{
int prot;
switch (dir) {
case DMA_BIDIRECTIONAL:
prot = IOMMU_READ | IOMMU_WRITE;
break;
case DMA_TO_DEVICE:
prot = IOMMU_READ;
break;
case DMA_FROM_DEVICE:
prot = IOMMU_WRITE;
break;
default:
prot = 0;
}
return prot;
}
/*
* Map a part of the scatter-gather list into contiguous io address space
*/
@ -1722,7 +1722,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
prot = __dma_direction_to_prot(dir);
prot = __dma_info_to_prot(dir, attrs);
ret = iommu_map(mapping->domain, iova, phys, len, prot);
if (ret < 0)
@ -1930,7 +1930,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
if (dma_addr == DMA_ERROR_CODE)
return dma_addr;
prot = __dma_direction_to_prot(dir);
prot = __dma_info_to_prot(dir, attrs);
ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
if (ret < 0)
@ -2036,7 +2036,7 @@ static dma_addr_t arm_iommu_map_resource(struct device *dev,
if (dma_addr == DMA_ERROR_CODE)
return dma_addr;
prot = __dma_direction_to_prot(dir) | IOMMU_MMIO;
prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO;
ret = iommu_map(mapping->domain, dma_addr, addr, len, prot);
if (ret < 0)

View file

@ -558,7 +558,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
unsigned long attrs)
{
bool coherent = is_device_dma_coherent(dev);
int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent);
int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
size_t iosize = size;
void *addr;
@ -712,7 +712,7 @@ static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
unsigned long attrs)
{
bool coherent = is_device_dma_coherent(dev);
int prot = dma_direction_to_prot(dir, coherent);
int prot = dma_info_to_prot(dir, coherent, attrs);
dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
if (!iommu_dma_mapping_error(dev, dev_addr) &&
@ -770,7 +770,7 @@ static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
__iommu_sync_sg_for_device(dev, sgl, nelems, dir);
return iommu_dma_map_sg(dev, sgl, nelems,
dma_direction_to_prot(dir, coherent));
dma_info_to_prot(dir, coherent, attrs));
}
static void __iommu_unmap_sg_attrs(struct device *dev,

View file

@ -1867,9 +1867,10 @@ static int dmac_alloc_resources(struct pl330_dmac *pl330)
* Alloc MicroCode buffer for 'chans' Channel threads.
* A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN)
*/
pl330->mcode_cpu = dma_alloc_coherent(pl330->ddma.dev,
pl330->mcode_cpu = dma_alloc_attrs(pl330->ddma.dev,
chans * pl330->mcbufsz,
&pl330->mcode_bus, GFP_KERNEL);
&pl330->mcode_bus, GFP_KERNEL,
DMA_ATTR_PRIVILEGED);
if (!pl330->mcode_cpu) {
dev_err(pl330->ddma.dev, "%s:%d Can't allocate memory!\n",
__func__, __LINE__);

View file

@ -269,9 +269,6 @@
#define STRTAB_STE_1_SHCFG_INCOMING 1UL
#define STRTAB_STE_1_SHCFG_SHIFT 44
#define STRTAB_STE_1_PRIVCFG_UNPRIV 2UL
#define STRTAB_STE_1_PRIVCFG_SHIFT 48
#define STRTAB_STE_2_S2VMID_SHIFT 0
#define STRTAB_STE_2_S2VMID_MASK 0xffffUL
#define STRTAB_STE_2_VTCR_SHIFT 32
@ -1076,9 +1073,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
#ifdef CONFIG_PCI_ATS
STRTAB_STE_1_EATS_TRANS << STRTAB_STE_1_EATS_SHIFT |
#endif
STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT |
STRTAB_STE_1_PRIVCFG_UNPRIV <<
STRTAB_STE_1_PRIVCFG_SHIFT);
STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT);
if (smmu->features & ARM_SMMU_FEAT_STALLS)
dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);

View file

@ -1217,7 +1217,7 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
continue;
s2cr[idx].type = type;
s2cr[idx].privcfg = S2CR_PRIVCFG_UNPRIV;
s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
s2cr[idx].cbndx = cbndx;
arm_smmu_write_s2cr(smmu, idx);
}

View file

@ -245,16 +245,22 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
EXPORT_SYMBOL(iommu_dma_init_domain);
/**
* dma_direction_to_prot - Translate DMA API directions to IOMMU API page flags
* dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
* page flags.
* @dir: Direction of DMA transfer
* @coherent: Is the DMA master cache-coherent?
* @attrs: DMA attributes for the mapping
*
* Return: corresponding IOMMU API page protection flags
*/
int dma_direction_to_prot(enum dma_data_direction dir, bool coherent)
int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
unsigned long attrs)
{
int prot = coherent ? IOMMU_CACHE : 0;
if (attrs & DMA_ATTR_PRIVILEGED)
prot |= IOMMU_PRIV;
switch (dir) {
case DMA_BIDIRECTIONAL:
return prot | IOMMU_READ | IOMMU_WRITE;
@ -697,7 +703,7 @@ dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
return __iommu_dma_map(dev, phys, size,
dma_direction_to_prot(dir, false) | IOMMU_MMIO);
dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO);
}
void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,

View file

@ -265,7 +265,9 @@ static arm_v7s_iopte arm_v7s_prot_to_pte(int prot, int lvl,
if (!(prot & IOMMU_MMIO))
pte |= ARM_V7S_ATTR_TEX(1);
if (ap) {
pte |= ARM_V7S_PTE_AF | ARM_V7S_PTE_AP_UNPRIV;
pte |= ARM_V7S_PTE_AF;
if (!(prot & IOMMU_PRIV))
pte |= ARM_V7S_PTE_AP_UNPRIV;
if (!(prot & IOMMU_WRITE))
pte |= ARM_V7S_PTE_AP_RDONLY;
}
@ -288,6 +290,8 @@ static int arm_v7s_pte_to_prot(arm_v7s_iopte pte, int lvl)
if (!(attr & ARM_V7S_PTE_AP_RDONLY))
prot |= IOMMU_WRITE;
if (!(attr & ARM_V7S_PTE_AP_UNPRIV))
prot |= IOMMU_PRIV;
if ((attr & (ARM_V7S_TEX_MASK << ARM_V7S_TEX_SHIFT)) == 0)
prot |= IOMMU_MMIO;
else if (pte & ARM_V7S_ATTR_C)

View file

@ -350,11 +350,14 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
if (data->iop.fmt == ARM_64_LPAE_S1 ||
data->iop.fmt == ARM_32_LPAE_S1) {
pte = ARM_LPAE_PTE_AP_UNPRIV | ARM_LPAE_PTE_nG;
pte = ARM_LPAE_PTE_nG;
if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
pte |= ARM_LPAE_PTE_AP_RDONLY;
if (!(prot & IOMMU_PRIV))
pte |= ARM_LPAE_PTE_AP_UNPRIV;
if (prot & IOMMU_MMIO)
pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
<< ARM_LPAE_PTE_ATTRINDX_SHIFT);

View file

@ -35,7 +35,8 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
u64 size, struct device *dev);
/* General helpers for DMA-API <-> IOMMU-API interaction */
int dma_direction_to_prot(enum dma_data_direction dir, bool coherent);
int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
unsigned long attrs);
/*
* These implement the bulk of the relevant DMA mapping callbacks, but require

View file

@ -62,6 +62,13 @@
*/
#define DMA_ATTR_NO_WARN (1UL << 8)
/*
* DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
* accessible at an elevated privilege level (and ideally inaccessible or
* at least read-only at lesser-privileged levels).
*/
#define DMA_ATTR_PRIVILEGED (1UL << 9)
/*
* A dma_addr_t can hold any valid DMA or bus address for the platform.
* It can be given to a device to use as a DMA source or target. A CPU cannot

View file

@ -31,6 +31,13 @@
#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */
#define IOMMU_NOEXEC (1 << 3)
#define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */
/*
* This is to make the IOMMU API setup privileged
* mapppings accessible by the master only at higher
* privileged execution level and inaccessible at
* less privileged levels.
*/
#define IOMMU_PRIV (1 << 5)
struct iommu_ops;
struct iommu_group;