Merge branch 'for-next/iommu/arm-smmu' into for-next/iommu/core

Arm SMMU updates for 5.11, including support for the SMMU integrated
into the Adreno GPU as well as workarounds for the broken firmware
implementation in the DB845c SoC from Qualcomm.

* for-next/iommu/arm-smmu:
  iommu: arm-smmu-impl: Add a space before open parenthesis
  iommu: arm-smmu-impl: Use table to list QCOM implementations
  iommu/arm-smmu: Move non-strict mode to use io_pgtable_domain_attr
  iommu/arm-smmu: Add support for pagetable config domain attribute
  iommu/io-pgtable-arm: Add support to use system cache
  iommu/io-pgtable: Add a domain attribute for pagetable configuration
  dt-bindings: arm-smmu: Add compatible string for Adreno GPU SMMU
  iommu/arm-smmu: Add a way for implementations to influence SCTLR
  iommu/arm-smmu-qcom: Add implementation for the adreno GPU SMMU
  iommu/arm-smmu-v3: Assign boolean values to a bool variable
  iommu/arm-smmu: Use new devm_krealloc()
  iommu/arm-smmu-qcom: Implement S2CR quirk
  iommu/arm-smmu-qcom: Read back stream mappings
  iommu/arm-smmu: Allow implementation specific write_s2cr
This commit is contained in:
Will Deacon 2020-12-08 14:45:10 +00:00
commit 75c75adce4
10 changed files with 342 additions and 50 deletions

View file

@ -28,8 +28,6 @@ properties:
- enum:
- qcom,msm8996-smmu-v2
- qcom,msm8998-smmu-v2
- qcom,sc7180-smmu-v2
- qcom,sdm845-smmu-v2
- const: qcom,smmu-v2
- description: Qcom SoCs implementing "arm,mmu-500"
@ -40,6 +38,13 @@ properties:
- qcom,sm8150-smmu-500
- qcom,sm8250-smmu-500
- const: arm,mmu-500
- description: Qcom Adreno GPUs implementing "arm,smmu-v2"
items:
- enum:
- qcom,sc7180-smmu-v2
- qcom,sdm845-smmu-v2
- const: qcom,adreno-smmu
- const: qcom,smmu-v2
- description: Marvell SoCs implementing "arm,mmu-500"
items:
- const: marvell,ap806-smmu-500

View file

@ -33,7 +33,7 @@
#include "arm-smmu-v3.h"
static bool disable_bypass = 1;
static bool disable_bypass = true;
module_param(disable_bypass, bool, 0444);
MODULE_PARM_DESC(disable_bypass,
"Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");

View file

@ -12,7 +12,7 @@
static int arm_smmu_gr0_ns(int offset)
{
switch(offset) {
switch (offset) {
case ARM_SMMU_GR0_sCR0:
case ARM_SMMU_GR0_sACR:
case ARM_SMMU_GR0_sGFSR:
@ -91,15 +91,12 @@ static struct arm_smmu_device *cavium_smmu_impl_init(struct arm_smmu_device *smm
{
struct cavium_smmu *cs;
cs = devm_kzalloc(smmu->dev, sizeof(*cs), GFP_KERNEL);
cs = devm_krealloc(smmu->dev, smmu, sizeof(*cs), GFP_KERNEL);
if (!cs)
return ERR_PTR(-ENOMEM);
cs->smmu = *smmu;
cs->smmu.impl = &cavium_impl;
devm_kfree(smmu->dev, smmu);
return &cs->smmu;
}
@ -217,11 +214,7 @@ struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu)
if (of_device_is_compatible(np, "nvidia,tegra194-smmu"))
return nvidia_smmu_impl_init(smmu);
if (of_device_is_compatible(np, "qcom,sdm845-smmu-500") ||
of_device_is_compatible(np, "qcom,sc7180-smmu-500") ||
of_device_is_compatible(np, "qcom,sm8150-smmu-500") ||
of_device_is_compatible(np, "qcom,sm8250-smmu-500"))
return qcom_smmu_impl_init(smmu);
smmu = qcom_smmu_impl_init(smmu);
if (of_device_is_compatible(np, "marvell,ap806-smmu-500"))
smmu->impl = &mrvl_mmu500_impl;

View file

@ -242,18 +242,10 @@ struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu)
struct nvidia_smmu *nvidia_smmu;
struct platform_device *pdev = to_platform_device(dev);
nvidia_smmu = devm_kzalloc(dev, sizeof(*nvidia_smmu), GFP_KERNEL);
nvidia_smmu = devm_krealloc(dev, smmu, sizeof(*nvidia_smmu), GFP_KERNEL);
if (!nvidia_smmu)
return ERR_PTR(-ENOMEM);
/*
* Copy the data from struct arm_smmu_device *smmu allocated in
* arm-smmu.c. The smmu from struct nvidia_smmu replaces the smmu
* pointer used in arm-smmu.c once this function returns.
* This is necessary to derive nvidia_smmu from smmu pointer passed
* through arm_smmu_impl function calls subsequently.
*/
nvidia_smmu->smmu = *smmu;
/* Instance 0 is ioremapped by arm-smmu.c. */
nvidia_smmu->bases[0] = smmu->base;
@ -267,12 +259,5 @@ struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu)
nvidia_smmu->smmu.impl = &nvidia_smmu_impl;
/*
* Free the struct arm_smmu_device *smmu allocated in arm-smmu.c.
* Once this function returns, arm-smmu.c would use arm_smmu_device
* allocated as part of struct nvidia_smmu.
*/
devm_kfree(dev, smmu);
return &nvidia_smmu->smmu;
}

View file

@ -3,6 +3,7 @@
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
*/
#include <linux/adreno-smmu-priv.h>
#include <linux/of_device.h>
#include <linux/qcom_scm.h>
@ -10,8 +11,155 @@
struct qcom_smmu {
struct arm_smmu_device smmu;
bool bypass_quirk;
u8 bypass_cbndx;
};
static struct qcom_smmu *to_qcom_smmu(struct arm_smmu_device *smmu)
{
return container_of(smmu, struct qcom_smmu, smmu);
}
static void qcom_adreno_smmu_write_sctlr(struct arm_smmu_device *smmu, int idx,
u32 reg)
{
/*
* On the GPU device we want to process subsequent transactions after a
* fault to keep the GPU from hanging
*/
reg |= ARM_SMMU_SCTLR_HUPCF;
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
}
#define QCOM_ADRENO_SMMU_GPU_SID 0
static bool qcom_adreno_smmu_is_gpu_device(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
int i;
/*
* The GPU will always use SID 0 so that is a handy way to uniquely
* identify it and configure it for per-instance pagetables
*/
for (i = 0; i < fwspec->num_ids; i++) {
u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
if (sid == QCOM_ADRENO_SMMU_GPU_SID)
return true;
}
return false;
}
static const struct io_pgtable_cfg *qcom_adreno_smmu_get_ttbr1_cfg(
const void *cookie)
{
struct arm_smmu_domain *smmu_domain = (void *)cookie;
struct io_pgtable *pgtable =
io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops);
return &pgtable->cfg;
}
/*
* Local implementation to configure TTBR0 with the specified pagetable config.
* The GPU driver will call this to enable TTBR0 when per-instance pagetables
* are active
*/
static int qcom_adreno_smmu_set_ttbr0_cfg(const void *cookie,
const struct io_pgtable_cfg *pgtbl_cfg)
{
struct arm_smmu_domain *smmu_domain = (void *)cookie;
struct io_pgtable *pgtable = io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops);
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
/* The domain must have split pagetables already enabled */
if (cb->tcr[0] & ARM_SMMU_TCR_EPD1)
return -EINVAL;
/* If the pagetable config is NULL, disable TTBR0 */
if (!pgtbl_cfg) {
/* Do nothing if it is already disabled */
if ((cb->tcr[0] & ARM_SMMU_TCR_EPD0))
return -EINVAL;
/* Set TCR to the original configuration */
cb->tcr[0] = arm_smmu_lpae_tcr(&pgtable->cfg);
cb->ttbr[0] = FIELD_PREP(ARM_SMMU_TTBRn_ASID, cb->cfg->asid);
} else {
u32 tcr = cb->tcr[0];
/* Don't call this again if TTBR0 is already enabled */
if (!(cb->tcr[0] & ARM_SMMU_TCR_EPD0))
return -EINVAL;
tcr |= arm_smmu_lpae_tcr(pgtbl_cfg);
tcr &= ~(ARM_SMMU_TCR_EPD0 | ARM_SMMU_TCR_EPD1);
cb->tcr[0] = tcr;
cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
cb->ttbr[0] |= FIELD_PREP(ARM_SMMU_TTBRn_ASID, cb->cfg->asid);
}
arm_smmu_write_context_bank(smmu_domain->smmu, cb->cfg->cbndx);
return 0;
}
static int qcom_adreno_smmu_alloc_context_bank(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_device *smmu,
struct device *dev, int start)
{
int count;
/*
* Assign context bank 0 to the GPU device so the GPU hardware can
* switch pagetables
*/
if (qcom_adreno_smmu_is_gpu_device(dev)) {
start = 0;
count = 1;
} else {
start = 1;
count = smmu->num_context_banks;
}
return __arm_smmu_alloc_bitmap(smmu->context_map, start, count);
}
static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
struct io_pgtable_cfg *pgtbl_cfg, struct device *dev)
{
struct adreno_smmu_priv *priv;
/* Only enable split pagetables for the GPU device (SID 0) */
if (!qcom_adreno_smmu_is_gpu_device(dev))
return 0;
/*
* All targets that use the qcom,adreno-smmu compatible string *should*
* be AARCH64 stage 1 but double check because the arm-smmu code assumes
* that is the case when the TTBR1 quirk is enabled
*/
if ((smmu_domain->stage == ARM_SMMU_DOMAIN_S1) &&
(smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64))
pgtbl_cfg->quirks |= IO_PGTABLE_QUIRK_ARM_TTBR1;
/*
* Initialize private interface with GPU:
*/
priv = dev_get_drvdata(dev);
priv->cookie = smmu_domain;
priv->get_ttbr1_cfg = qcom_adreno_smmu_get_ttbr1_cfg;
priv->set_ttbr0_cfg = qcom_adreno_smmu_set_ttbr0_cfg;
return 0;
}
static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
{ .compatible = "qcom,adreno" },
{ .compatible = "qcom,mdp4" },
@ -23,6 +171,87 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
{ }
};
static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu)
{
unsigned int last_s2cr = ARM_SMMU_GR0_S2CR(smmu->num_mapping_groups - 1);
struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
u32 reg;
u32 smr;
int i;
/*
* With some firmware versions writes to S2CR of type FAULT are
* ignored, and writing BYPASS will end up written as FAULT in the
* register. Perform a write to S2CR to detect if this is the case and
* if so reserve a context bank to emulate bypass streams.
*/
reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, S2CR_TYPE_BYPASS) |
FIELD_PREP(ARM_SMMU_S2CR_CBNDX, 0xff) |
FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, S2CR_PRIVCFG_DEFAULT);
arm_smmu_gr0_write(smmu, last_s2cr, reg);
reg = arm_smmu_gr0_read(smmu, last_s2cr);
if (FIELD_GET(ARM_SMMU_S2CR_TYPE, reg) != S2CR_TYPE_BYPASS) {
qsmmu->bypass_quirk = true;
qsmmu->bypass_cbndx = smmu->num_context_banks - 1;
set_bit(qsmmu->bypass_cbndx, smmu->context_map);
reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, CBAR_TYPE_S1_TRANS_S2_BYPASS);
arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(qsmmu->bypass_cbndx), reg);
}
for (i = 0; i < smmu->num_mapping_groups; i++) {
smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
if (FIELD_GET(ARM_SMMU_SMR_VALID, smr)) {
smmu->smrs[i].id = FIELD_GET(ARM_SMMU_SMR_ID, smr);
smmu->smrs[i].mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr);
smmu->smrs[i].valid = true;
smmu->s2crs[i].type = S2CR_TYPE_BYPASS;
smmu->s2crs[i].privcfg = S2CR_PRIVCFG_DEFAULT;
smmu->s2crs[i].cbndx = 0xff;
}
}
return 0;
}
static void qcom_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
{
struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
u32 cbndx = s2cr->cbndx;
u32 type = s2cr->type;
u32 reg;
if (qsmmu->bypass_quirk) {
if (type == S2CR_TYPE_BYPASS) {
/*
* Firmware with quirky S2CR handling will substitute
* BYPASS writes with FAULT, so point the stream to the
* reserved context bank and ask for translation on the
* stream
*/
type = S2CR_TYPE_TRANS;
cbndx = qsmmu->bypass_cbndx;
} else if (type == S2CR_TYPE_FAULT) {
/*
* Firmware with quirky S2CR handling will ignore FAULT
* writes, so trick it to write FAULT by asking for a
* BYPASS.
*/
type = S2CR_TYPE_BYPASS;
cbndx = 0xff;
}
}
reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, type) |
FIELD_PREP(ARM_SMMU_S2CR_CBNDX, cbndx) |
FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg);
arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg);
}
static int qcom_smmu_def_domain_type(struct device *dev)
{
const struct of_device_id *match =
@ -61,22 +290,51 @@ static int qcom_smmu500_reset(struct arm_smmu_device *smmu)
}
static const struct arm_smmu_impl qcom_smmu_impl = {
.cfg_probe = qcom_smmu_cfg_probe,
.def_domain_type = qcom_smmu_def_domain_type,
.reset = qcom_smmu500_reset,
.write_s2cr = qcom_smmu_write_s2cr,
};
static const struct arm_smmu_impl qcom_adreno_smmu_impl = {
.init_context = qcom_adreno_smmu_init_context,
.def_domain_type = qcom_smmu_def_domain_type,
.reset = qcom_smmu500_reset,
.alloc_context_bank = qcom_adreno_smmu_alloc_context_bank,
.write_sctlr = qcom_adreno_smmu_write_sctlr,
};
static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu,
const struct arm_smmu_impl *impl)
{
struct qcom_smmu *qsmmu;
qsmmu = devm_krealloc(smmu->dev, smmu, sizeof(*qsmmu), GFP_KERNEL);
if (!qsmmu)
return ERR_PTR(-ENOMEM);
qsmmu->smmu.impl = impl;
return &qsmmu->smmu;
}
static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = {
{ .compatible = "qcom,sc7180-smmu-500" },
{ .compatible = "qcom,sdm845-smmu-500" },
{ .compatible = "qcom,sm8150-smmu-500" },
{ .compatible = "qcom,sm8250-smmu-500" },
{ }
};
struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu)
{
struct qcom_smmu *qsmmu;
const struct device_node *np = smmu->dev->of_node;
qsmmu = devm_kzalloc(smmu->dev, sizeof(*qsmmu), GFP_KERNEL);
if (!qsmmu)
return ERR_PTR(-ENOMEM);
if (of_match_node(qcom_smmu_impl_of_match, np))
return qcom_smmu_create(smmu, &qcom_smmu_impl);
qsmmu->smmu = *smmu;
if (of_device_is_compatible(np, "qcom,adreno-smmu"))
return qcom_smmu_create(smmu, &qcom_adreno_smmu_impl);
qsmmu->smmu.impl = &qcom_smmu_impl;
devm_kfree(smmu->dev, smmu);
return &qsmmu->smmu;
return smmu;
}

View file

@ -617,7 +617,10 @@ void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
reg |= ARM_SMMU_SCTLR_E;
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
if (smmu->impl && smmu->impl->write_sctlr)
smmu->impl->write_sctlr(smmu, idx, reg);
else
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
}
static int arm_smmu_alloc_context_bank(struct arm_smmu_domain *smmu_domain,
@ -783,8 +786,8 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
goto out_clear_smmu;
}
if (smmu_domain->non_strict)
pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
if (smmu_domain->pgtbl_cfg.quirks)
pgtbl_cfg.quirks |= smmu_domain->pgtbl_cfg.quirks;
pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
if (!pgtbl_ops) {
@ -929,9 +932,16 @@ static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
{
struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
u32 reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, s2cr->type) |
FIELD_PREP(ARM_SMMU_S2CR_CBNDX, s2cr->cbndx) |
FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg);
u32 reg;
if (smmu->impl && smmu->impl->write_s2cr) {
smmu->impl->write_s2cr(smmu, idx);
return;
}
reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, s2cr->type) |
FIELD_PREP(ARM_SMMU_S2CR_CBNDX, s2cr->cbndx) |
FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg);
if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
smmu->smrs[idx].valid)
@ -1501,15 +1511,24 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
case DOMAIN_ATTR_NESTING:
*(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
return 0;
case DOMAIN_ATTR_IO_PGTABLE_CFG: {
struct io_pgtable_domain_attr *pgtbl_cfg = data;
*pgtbl_cfg = smmu_domain->pgtbl_cfg;
return 0;
}
default:
return -ENODEV;
}
break;
case IOMMU_DOMAIN_DMA:
switch (attr) {
case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
*(int *)data = smmu_domain->non_strict;
case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: {
bool non_strict = smmu_domain->pgtbl_cfg.quirks &
IO_PGTABLE_QUIRK_NON_STRICT;
*(int *)data = non_strict;
return 0;
}
default:
return -ENODEV;
}
@ -1541,6 +1560,17 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
else
smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
break;
case DOMAIN_ATTR_IO_PGTABLE_CFG: {
struct io_pgtable_domain_attr *pgtbl_cfg = data;
if (smmu_domain->smmu) {
ret = -EPERM;
goto out_unlock;
}
smmu_domain->pgtbl_cfg = *pgtbl_cfg;
break;
}
default:
ret = -ENODEV;
}
@ -1548,7 +1578,10 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
case IOMMU_DOMAIN_DMA:
switch (attr) {
case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
smmu_domain->non_strict = *(int *)data;
if (*(int *)data)
smmu_domain->pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
else
smmu_domain->pgtbl_cfg.quirks &= ~IO_PGTABLE_QUIRK_NON_STRICT;
break;
default:
ret = -ENODEV;

View file

@ -144,6 +144,7 @@ enum arm_smmu_cbar_type {
#define ARM_SMMU_CB_SCTLR 0x0
#define ARM_SMMU_SCTLR_S1_ASIDPNE BIT(12)
#define ARM_SMMU_SCTLR_CFCFG BIT(7)
#define ARM_SMMU_SCTLR_HUPCF BIT(8)
#define ARM_SMMU_SCTLR_CFIE BIT(6)
#define ARM_SMMU_SCTLR_CFRE BIT(5)
#define ARM_SMMU_SCTLR_E BIT(4)
@ -363,10 +364,10 @@ enum arm_smmu_domain_stage {
struct arm_smmu_domain {
struct arm_smmu_device *smmu;
struct io_pgtable_ops *pgtbl_ops;
struct io_pgtable_domain_attr pgtbl_cfg;
const struct iommu_flush_ops *flush_ops;
struct arm_smmu_cfg cfg;
enum arm_smmu_domain_stage stage;
bool non_strict;
struct mutex init_mutex; /* Protects smmu pointer */
spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */
struct iommu_domain domain;
@ -436,6 +437,8 @@ struct arm_smmu_impl {
int (*alloc_context_bank)(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_device *smmu,
struct device *dev, int start);
void (*write_s2cr)(struct arm_smmu_device *smmu, int idx);
void (*write_sctlr)(struct arm_smmu_device *smmu, int idx, u32 reg);
};
#define INVALID_SMENDX -1

View file

@ -761,7 +761,8 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
IO_PGTABLE_QUIRK_NON_STRICT |
IO_PGTABLE_QUIRK_ARM_TTBR1))
IO_PGTABLE_QUIRK_ARM_TTBR1 |
IO_PGTABLE_QUIRK_ARM_OUTER_WBWA))
return NULL;
data = arm_lpae_alloc_pgtable(cfg);
@ -773,10 +774,15 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
tcr->sh = ARM_LPAE_TCR_SH_IS;
tcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
tcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA)
goto out_free_data;
} else {
tcr->sh = ARM_LPAE_TCR_SH_OS;
tcr->irgn = ARM_LPAE_TCR_RGN_NC;
tcr->orgn = ARM_LPAE_TCR_RGN_NC;
if (!(cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA))
tcr->orgn = ARM_LPAE_TCR_RGN_NC;
else
tcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
}
tg1 = cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1;

View file

@ -86,6 +86,9 @@ struct io_pgtable_cfg {
*
* IO_PGTABLE_QUIRK_ARM_TTBR1: (ARM LPAE format) Configure the table
* for use in the upper half of a split address space.
*
* IO_PGTABLE_QUIRK_ARM_OUTER_WBWA: Override the outer-cacheability
* attributes set in the TCR for a non-coherent page-table walker.
*/
#define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
#define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
@ -93,6 +96,7 @@ struct io_pgtable_cfg {
#define IO_PGTABLE_QUIRK_ARM_MTK_EXT BIT(3)
#define IO_PGTABLE_QUIRK_NON_STRICT BIT(4)
#define IO_PGTABLE_QUIRK_ARM_TTBR1 BIT(5)
#define IO_PGTABLE_QUIRK_ARM_OUTER_WBWA BIT(6)
unsigned long quirks;
unsigned long pgsize_bitmap;
unsigned int ias;
@ -208,6 +212,10 @@ struct io_pgtable {
#define io_pgtable_ops_to_pgtable(x) container_of((x), struct io_pgtable, ops)
struct io_pgtable_domain_attr {
unsigned long quirks;
};
static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop)
{
iop->cfg.tlb->tlb_flush_all(iop->cookie);

View file

@ -118,6 +118,7 @@ enum iommu_attr {
DOMAIN_ATTR_FSL_PAMUV1,
DOMAIN_ATTR_NESTING, /* two stages of translation */
DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
DOMAIN_ATTR_IO_PGTABLE_CFG,
DOMAIN_ATTR_MAX,
};