2019-05-29 23:57:35 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2016-09-12 18:54:20 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2016, Semihalf
|
|
|
|
* Author: Tomasz Nowicki <tn@semihalf.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __ACPI_IORT_H__
|
|
|
|
#define __ACPI_IORT_H__
|
|
|
|
|
|
|
|
#include <linux/acpi.h>
|
2016-09-12 18:32:21 +00:00
|
|
|
#include <linux/fwnode.h>
|
|
|
|
#include <linux/irqdomain.h>
|
2016-09-12 18:54:20 +00:00
|
|
|
|
2016-11-21 10:01:45 +00:00
|
|
|
#define IORT_IRQ_MASK(irq) (irq & 0xffffffffULL)
|
|
|
|
#define IORT_IRQ_TRIGGER_MASK(irq) ((irq >> 32) & 0xffffffffULL)
|
|
|
|
|
2019-03-26 15:17:50 +00:00
|
|
|
/*
|
|
|
|
* PMCG model identifiers for use in smmu pmu driver. Please note
|
|
|
|
* that this is purely for the use of software and has nothing to
|
|
|
|
* do with hardware or with IORT specification.
|
|
|
|
*/
|
|
|
|
#define IORT_SMMU_V3_PMCG_GENERIC 0x00000000 /* Generic SMMUv3 PMCG */
|
2019-03-26 15:17:53 +00:00
|
|
|
#define IORT_SMMU_V3_PMCG_HISI_HIP08 0x00000001 /* HiSilicon HIP08 PMCG */
|
2023-08-14 12:40:12 +00:00
|
|
|
#define IORT_SMMU_V3_PMCG_HISI_HIP09 0x00000002 /* HiSilicon HIP09 PMCG */
|
2019-03-26 15:17:50 +00:00
|
|
|
|
2018-02-13 15:20:50 +00:00
|
|
|
int iort_register_domain_token(int trans_id, phys_addr_t base,
|
|
|
|
struct fwnode_handle *fw_node);
|
2016-09-12 18:32:21 +00:00
|
|
|
void iort_deregister_domain_token(int trans_id);
|
|
|
|
struct fwnode_handle *iort_find_domain_token(int trans_id);
|
2023-05-16 20:05:08 +00:00
|
|
|
int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id);
|
|
|
|
|
2016-09-12 18:54:20 +00:00
|
|
|
#ifdef CONFIG_ACPI_IORT
|
2020-06-19 08:20:04 +00:00
|
|
|
u32 iort_msi_map_id(struct device *dev, u32 id);
|
2020-06-19 08:20:03 +00:00
|
|
|
struct irq_domain *iort_get_device_domain(struct device *dev, u32 id,
|
|
|
|
enum irq_domain_bus_token bus_token);
|
2017-03-07 12:40:06 +00:00
|
|
|
void acpi_configure_pmsi_domain(struct device *dev);
|
2022-06-15 10:10:40 +00:00
|
|
|
void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode,
|
|
|
|
struct list_head *head);
|
|
|
|
void iort_put_rmr_sids(struct fwnode_handle *iommu_fwnode,
|
|
|
|
struct list_head *head);
|
2016-11-21 10:01:48 +00:00
|
|
|
/* IOMMU interface */
|
2021-06-18 15:20:56 +00:00
|
|
|
int iort_dma_get_ranges(struct device *dev, u64 *size);
|
2021-06-18 15:20:57 +00:00
|
|
|
int iort_iommu_configure_id(struct device *dev, const u32 *id_in);
|
2022-06-15 10:10:38 +00:00
|
|
|
void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head);
|
arm64: mm: Set ZONE_DMA size based on early IORT scan
We recently introduced a 1 GB sized ZONE_DMA to cater for platforms
incorporating masters that can address less than 32 bits of DMA, in
particular the Raspberry Pi 4, which has 4 or 8 GB of DRAM, but has
peripherals that can only address up to 1 GB (and its PCIe host
bridge can only access the bottom 3 GB)
Instructing the DMA layer about these limitations is straight-forward,
even though we had to fix some issues regarding memory limits set in
the IORT for named components, and regarding the handling of ACPI _DMA
methods. However, the DMA layer also needs to be able to allocate
memory that is guaranteed to meet those DMA constraints, for bounce
buffering as well as allocating the backing for consistent mappings.
This is why the 1 GB ZONE_DMA was introduced recently. Unfortunately,
it turns out the having a 1 GB ZONE_DMA as well as a ZONE_DMA32 causes
problems with kdump, and potentially in other places where allocations
cannot cross zone boundaries. Therefore, we should avoid having two
separate DMA zones when possible.
So let's do an early scan of the IORT, and only create the ZONE_DMA
if we encounter any devices that need it. This puts the burden on
the firmware to describe such limitations in the IORT, which may be
redundant (and less precise) if _DMA methods are also being provided.
However, it should be noted that this situation is highly unusual for
arm64 ACPI machines. Also, the DMA subsystem still gives precedence to
the _DMA method if implemented, and so we will not lose the ability to
perform streaming DMA outside the ZONE_DMA if the _DMA method permits
it.
[nsaenz: unified implementation with DT's counterpart]
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Acked-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Acked-by: Hanjun Guo <guohanjun@huawei.com>
Cc: Jeremy Linton <jeremy.linton@arm.com>
Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Cc: Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
Cc: Rob Herring <robh+dt@kernel.org>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Hanjun Guo <guohanjun@huawei.com>
Cc: Sudeep Holla <sudeep.holla@arm.com>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Link: https://lore.kernel.org/r/20201119175400.9995-7-nsaenzjulienne@suse.de
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2020-11-19 17:53:58 +00:00
|
|
|
phys_addr_t acpi_iort_dma_get_max_cpu_address(void);
|
2016-09-12 18:54:20 +00:00
|
|
|
#else
|
2020-06-19 08:20:04 +00:00
|
|
|
static inline u32 iort_msi_map_id(struct device *dev, u32 id)
|
|
|
|
{ return id; }
|
2020-06-19 08:20:03 +00:00
|
|
|
static inline struct irq_domain *iort_get_device_domain(
|
|
|
|
struct device *dev, u32 id, enum irq_domain_bus_token bus_token)
|
2016-09-12 18:32:21 +00:00
|
|
|
{ return NULL; }
|
2017-03-07 12:40:06 +00:00
|
|
|
static inline void acpi_configure_pmsi_domain(struct device *dev) { }
|
2022-06-15 10:10:40 +00:00
|
|
|
static inline
|
|
|
|
void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode, struct list_head *head) { }
|
|
|
|
static inline
|
|
|
|
void iort_put_rmr_sids(struct fwnode_handle *iommu_fwnode, struct list_head *head) { }
|
2016-11-21 10:01:48 +00:00
|
|
|
/* IOMMU interface */
|
2021-06-18 15:20:56 +00:00
|
|
|
static inline int iort_dma_get_ranges(struct device *dev, u64 *size)
|
|
|
|
{ return -ENODEV; }
|
2021-06-18 15:20:57 +00:00
|
|
|
static inline int iort_iommu_configure_id(struct device *dev, const u32 *id_in)
|
|
|
|
{ return -ENODEV; }
|
2018-02-13 15:20:50 +00:00
|
|
|
static inline
|
2022-06-15 10:10:38 +00:00
|
|
|
void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head)
|
2022-06-15 10:10:37 +00:00
|
|
|
{ }
|
arm64: mm: Set ZONE_DMA size based on early IORT scan
We recently introduced a 1 GB sized ZONE_DMA to cater for platforms
incorporating masters that can address less than 32 bits of DMA, in
particular the Raspberry Pi 4, which has 4 or 8 GB of DRAM, but has
peripherals that can only address up to 1 GB (and its PCIe host
bridge can only access the bottom 3 GB)
Instructing the DMA layer about these limitations is straight-forward,
even though we had to fix some issues regarding memory limits set in
the IORT for named components, and regarding the handling of ACPI _DMA
methods. However, the DMA layer also needs to be able to allocate
memory that is guaranteed to meet those DMA constraints, for bounce
buffering as well as allocating the backing for consistent mappings.
This is why the 1 GB ZONE_DMA was introduced recently. Unfortunately,
it turns out the having a 1 GB ZONE_DMA as well as a ZONE_DMA32 causes
problems with kdump, and potentially in other places where allocations
cannot cross zone boundaries. Therefore, we should avoid having two
separate DMA zones when possible.
So let's do an early scan of the IORT, and only create the ZONE_DMA
if we encounter any devices that need it. This puts the burden on
the firmware to describe such limitations in the IORT, which may be
redundant (and less precise) if _DMA methods are also being provided.
However, it should be noted that this situation is highly unusual for
arm64 ACPI machines. Also, the DMA subsystem still gives precedence to
the _DMA method if implemented, and so we will not lose the ability to
perform streaming DMA outside the ZONE_DMA if the _DMA method permits
it.
[nsaenz: unified implementation with DT's counterpart]
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Acked-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Acked-by: Hanjun Guo <guohanjun@huawei.com>
Cc: Jeremy Linton <jeremy.linton@arm.com>
Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Cc: Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
Cc: Rob Herring <robh+dt@kernel.org>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Hanjun Guo <guohanjun@huawei.com>
Cc: Sudeep Holla <sudeep.holla@arm.com>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Link: https://lore.kernel.org/r/20201119175400.9995-7-nsaenzjulienne@suse.de
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2020-11-19 17:53:58 +00:00
|
|
|
|
|
|
|
static inline phys_addr_t acpi_iort_dma_get_max_cpu_address(void)
|
|
|
|
{ return PHYS_ADDR_MAX; }
|
2016-09-12 18:54:20 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif /* __ACPI_IORT_H__ */
|