xen: branch for v5.19-rc2

-----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQRTLbB6QfY48x44uB6AXGG7T9hjvgUCYqLLcQAKCRCAXGG7T9hj
 vggVAP0Wjf81TxvIGrwtcn1q2LW7xyNp5TQk1T6GYx40sHXWlQD/SCKl5iorSGn+
 +g1eXYmOpnSIAcMX3B3T4Ra8uo3mnA8=
 =PnB/
 -----END PGP SIGNATURE-----

Merge tag 'for-linus-5.19a-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen updates from Juergen Gross:

 - a small cleanup removing "export" of an __init function

 - a small series adding a new infrastructure for platform flags

 - a series adding generic virtio support for Xen guests (frontend side)

* tag 'for-linus-5.19a-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen: unexport __init-annotated xen_xlate_map_ballooned_pages()
  arm/xen: Assign xen-grant DMA ops for xen-grant DMA devices
  xen/grant-dma-ops: Retrieve the ID of backend's domain for DT devices
  xen/grant-dma-iommu: Introduce stub IOMMU driver
  dt-bindings: Add xen,grant-dma IOMMU description for xen-grant DMA ops
  xen/virtio: Enable restricted memory access using Xen grant mappings
  xen/grant-dma-ops: Add option to restrict memory access under Xen
  xen/grants: support allocating consecutive grants
  arm/xen: Introduce xen_setup_dma_ops()
  virtio: replace arch_has_restricted_virtio_memory_access()
  kernel: add platform_has() infrastructure
This commit is contained in:
Linus Torvalds 2022-06-10 09:57:11 -07:00
commit f2ecc964b9
32 changed files with 832 additions and 87 deletions

View File

@ -0,0 +1,39 @@
# SPDX-License-Identifier: (GPL-2.0-only or BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/iommu/xen,grant-dma.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Xen specific IOMMU for virtualized devices (e.g. virtio)
maintainers:
- Stefano Stabellini <sstabellini@kernel.org>
description:
The Xen IOMMU represents the Xen grant table interface. Grant mappings
are to be used with devices connected to the Xen IOMMU using the "iommus"
property, which also specifies the ID of the backend domain.
The binding is required to restrict memory access using Xen grant mappings.
properties:
compatible:
const: xen,grant-dma
'#iommu-cells':
const: 1
description:
The single cell is the domid (domain ID) of the domain where the backend
is running.
required:
- compatible
- "#iommu-cells"
additionalProperties: false
examples:
- |
iommu {
compatible = "xen,grant-dma";
#iommu-cells = <1>;
};

View File

@ -15831,6 +15831,14 @@ S: Maintained
F: Documentation/devicetree/bindings/iio/chemical/plantower,pms7003.yaml
F: drivers/iio/chemical/pms7003.c
PLATFORM FEATURE INFRASTRUCTURE
M: Juergen Gross <jgross@suse.com>
S: Maintained
F: arch/*/include/asm/platform-feature.h
F: include/asm-generic/platform-feature.h
F: include/linux/platform-feature.h
F: kernel/platform-feature.c
PLDMFW LIBRARY
M: Jacob Keller <jacob.e.keller@intel.com>
S: Maintained

View File

@ -0,0 +1,2 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <xen/arm/xen-ops.h>

View File

@ -33,7 +33,7 @@
#include <asm/dma-iommu.h>
#include <asm/mach/map.h>
#include <asm/system_info.h>
#include <xen/swiotlb-xen.h>
#include <asm/xen/xen-ops.h>
#include "dma.h"
#include "mm.h"
@ -2287,10 +2287,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
set_dma_ops(dev, dma_ops);
#ifdef CONFIG_XEN
if (xen_initial_domain())
dev->dma_ops = &xen_swiotlb_dma_ops;
#endif
xen_setup_dma_ops(dev);
dev->archdata.dma_ops_setup = true;
}

View File

@ -443,6 +443,8 @@ static int __init xen_guest_init(void)
if (!xen_domain())
return 0;
xen_set_restricted_virtio_memory_access();
if (!acpi_disabled)
xen_acpi_guest_init();
else

View File

@ -0,0 +1,2 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <xen/arm/xen-ops.h>

View File

@ -9,9 +9,9 @@
#include <linux/dma-map-ops.h>
#include <linux/dma-iommu.h>
#include <xen/xen.h>
#include <xen/swiotlb-xen.h>
#include <asm/cacheflush.h>
#include <asm/xen/xen-ops.h>
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
@ -52,8 +52,5 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
if (iommu)
iommu_setup_dma_ops(dev, dma_base, dma_base + size - 1);
#ifdef CONFIG_XEN
if (xen_swiotlb_detect())
dev->dma_ops = &xen_swiotlb_dma_ops;
#endif
xen_setup_dma_ops(dev);
}

View File

@ -769,7 +769,6 @@ menu "Virtualization"
config PROTECTED_VIRTUALIZATION_GUEST
def_bool n
prompt "Protected virtualization guest support"
select ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
help
Select this option, if you want to be able to run this
kernel as a protected virtualization KVM guest.

View File

@ -31,6 +31,7 @@
#include <linux/cma.h>
#include <linux/gfp.h>
#include <linux/dma-direct.h>
#include <linux/platform-feature.h>
#include <asm/processor.h>
#include <linux/uaccess.h>
#include <asm/pgalloc.h>
@ -168,22 +169,14 @@ bool force_dma_unencrypted(struct device *dev)
return is_prot_virt_guest();
}
#ifdef CONFIG_ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
int arch_has_restricted_virtio_memory_access(void)
{
return is_prot_virt_guest();
}
EXPORT_SYMBOL(arch_has_restricted_virtio_memory_access);
#endif
/* protected virtualization */
static void pv_init(void)
{
if (!is_prot_virt_guest())
return;
platform_set(PLATFORM_VIRTIO_RESTRICTED_MEM_ACCESS);
/* make sure bounce buffers are shared */
swiotlb_init(true, SWIOTLB_FORCE | SWIOTLB_VERBOSE);
swiotlb_update_mem_attributes();

View File

@ -1542,7 +1542,6 @@ config X86_CPA_STATISTICS
config X86_MEM_ENCRYPT
select ARCH_HAS_FORCE_DMA_UNENCRYPTED
select DYNAMIC_PHYSICAL_MASK
select ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
def_bool n
config AMD_MEM_ENCRYPT

View File

@ -12,7 +12,6 @@
#include <linux/swiotlb.h>
#include <linux/cc_platform.h>
#include <linux/mem_encrypt.h>
#include <linux/virtio_config.h>
/* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
bool force_dma_unencrypted(struct device *dev)
@ -87,9 +86,3 @@ void __init mem_encrypt_init(void)
print_mem_encrypt_feature_info();
}
int arch_has_restricted_virtio_memory_access(void)
{
return cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT);
}
EXPORT_SYMBOL_GPL(arch_has_restricted_virtio_memory_access);

View File

@ -21,6 +21,7 @@
#include <linux/dma-mapping.h>
#include <linux/virtio_config.h>
#include <linux/cc_platform.h>
#include <linux/platform-feature.h>
#include <asm/tlbflush.h>
#include <asm/fixmap.h>
@ -242,6 +243,9 @@ void __init sev_setup_arch(void)
size = total_mem * 6 / 100;
size = clamp_val(size, IO_TLB_DEFAULT_SIZE, SZ_1G);
swiotlb_adjust_size(size);
/* Set restricted memory access for virtio. */
platform_set(PLATFORM_VIRTIO_RESTRICTED_MEM_ACCESS);
}
static unsigned long pg_level_to_pfn(int level, pte_t *kpte, pgprot_t *ret_prot)

View File

@ -195,6 +195,8 @@ static void __init xen_hvm_guest_init(void)
if (xen_pv_domain())
return;
xen_set_restricted_virtio_memory_access();
init_hvm_pv_info();
reserve_shared_info();

View File

@ -109,6 +109,8 @@ static DEFINE_PER_CPU(struct tls_descs, shadow_tls_desc);
static void __init xen_pv_init_platform(void)
{
xen_set_restricted_virtio_memory_access();
populate_extra_pte(fix_to_virt(FIX_PARAVIRT_BOOTMAP));
set_fixmap(FIX_PARAVIRT_BOOTMAP, xen_start_info->shared_info);

View File

@ -6,12 +6,6 @@ config VIRTIO
bus, such as CONFIG_VIRTIO_PCI, CONFIG_VIRTIO_MMIO, CONFIG_RPMSG
or CONFIG_S390_GUEST.
config ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
bool
help
This option is selected if the architecture may need to enforce
VIRTIO_F_ACCESS_PLATFORM
config VIRTIO_PCI_LIB
tristate
help

View File

@ -5,6 +5,7 @@
#include <linux/module.h>
#include <linux/idr.h>
#include <linux/of.h>
#include <linux/platform-feature.h>
#include <uapi/linux/virtio_ids.h>
/* Unique numbering for virtio devices. */
@ -170,12 +171,10 @@ EXPORT_SYMBOL_GPL(virtio_add_status);
static int virtio_features_ok(struct virtio_device *dev)
{
unsigned int status;
int ret;
might_sleep();
ret = arch_has_restricted_virtio_memory_access();
if (ret) {
if (platform_has(PLATFORM_VIRTIO_RESTRICTED_MEM_ACCESS)) {
if (!virtio_has_feature(dev, VIRTIO_F_VERSION_1)) {
dev_warn(&dev->dev,
"device must provide VIRTIO_F_VERSION_1\n");

View File

@ -335,4 +335,24 @@ config XEN_UNPOPULATED_ALLOC
having to balloon out RAM regions in order to obtain physical memory
space to create such mappings.
config XEN_GRANT_DMA_IOMMU
bool
select IOMMU_API
config XEN_GRANT_DMA_OPS
bool
select DMA_OPS
config XEN_VIRTIO
bool "Xen virtio support"
depends on VIRTIO
select XEN_GRANT_DMA_OPS
select XEN_GRANT_DMA_IOMMU if OF
help
Enable virtio support for running as Xen guest. Depending on the
guest type this will require special support on the backend side
(qemu or kernel, depending on the virtio device types used).
If in doubt, say n.
endmenu

View File

@ -39,3 +39,5 @@ xen-gntalloc-y := gntalloc.o
xen-privcmd-y := privcmd.o privcmd-buf.o
obj-$(CONFIG_XEN_FRONT_PGDIR_SHBUF) += xen-front-pgdir-shbuf.o
obj-$(CONFIG_XEN_UNPOPULATED_ALLOC) += unpopulated-alloc.o
obj-$(CONFIG_XEN_GRANT_DMA_OPS) += grant-dma-ops.o
obj-$(CONFIG_XEN_GRANT_DMA_IOMMU) += grant-dma-iommu.o

View File

@ -0,0 +1,78 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Stub IOMMU driver which does nothing.
* The main purpose of it being present is to reuse generic IOMMU device tree
* bindings by Xen grant DMA-mapping layer.
*
* Copyright (C) 2022 EPAM Systems Inc.
*/
#include <linux/iommu.h>
#include <linux/of.h>
#include <linux/platform_device.h>
struct grant_dma_iommu_device {
struct device *dev;
struct iommu_device iommu;
};
/* Nothing is really needed here */
static const struct iommu_ops grant_dma_iommu_ops;
static const struct of_device_id grant_dma_iommu_of_match[] = {
{ .compatible = "xen,grant-dma" },
{ },
};
static int grant_dma_iommu_probe(struct platform_device *pdev)
{
struct grant_dma_iommu_device *mmu;
int ret;
mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL);
if (!mmu)
return -ENOMEM;
mmu->dev = &pdev->dev;
ret = iommu_device_register(&mmu->iommu, &grant_dma_iommu_ops, &pdev->dev);
if (ret)
return ret;
platform_set_drvdata(pdev, mmu);
return 0;
}
static int grant_dma_iommu_remove(struct platform_device *pdev)
{
struct grant_dma_iommu_device *mmu = platform_get_drvdata(pdev);
platform_set_drvdata(pdev, NULL);
iommu_device_unregister(&mmu->iommu);
return 0;
}
static struct platform_driver grant_dma_iommu_driver = {
.driver = {
.name = "grant-dma-iommu",
.of_match_table = grant_dma_iommu_of_match,
},
.probe = grant_dma_iommu_probe,
.remove = grant_dma_iommu_remove,
};
static int __init grant_dma_iommu_init(void)
{
struct device_node *iommu_np;
iommu_np = of_find_matching_node(NULL, grant_dma_iommu_of_match);
if (!iommu_np)
return 0;
of_node_put(iommu_np);
return platform_driver_register(&grant_dma_iommu_driver);
}
subsys_initcall(grant_dma_iommu_init);

346
drivers/xen/grant-dma-ops.c Normal file
View File

@ -0,0 +1,346 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Xen grant DMA-mapping layer - contains special DMA-mapping routines
* for providing grant references as DMA addresses to be used by frontends
* (e.g. virtio) in Xen guests
*
* Copyright (c) 2021, Juergen Gross <jgross@suse.com>
*/
#include <linux/module.h>
#include <linux/dma-map-ops.h>
#include <linux/of.h>
#include <linux/pfn.h>
#include <linux/xarray.h>
#include <xen/xen.h>
#include <xen/xen-ops.h>
#include <xen/grant_table.h>
struct xen_grant_dma_data {
/* The ID of backend domain */
domid_t backend_domid;
/* Is device behaving sane? */
bool broken;
};
static DEFINE_XARRAY(xen_grant_dma_devices);
#define XEN_GRANT_DMA_ADDR_OFF (1ULL << 63)
static inline dma_addr_t grant_to_dma(grant_ref_t grant)
{
return XEN_GRANT_DMA_ADDR_OFF | ((dma_addr_t)grant << PAGE_SHIFT);
}
static inline grant_ref_t dma_to_grant(dma_addr_t dma)
{
return (grant_ref_t)((dma & ~XEN_GRANT_DMA_ADDR_OFF) >> PAGE_SHIFT);
}
static struct xen_grant_dma_data *find_xen_grant_dma_data(struct device *dev)
{
struct xen_grant_dma_data *data;
xa_lock(&xen_grant_dma_devices);
data = xa_load(&xen_grant_dma_devices, (unsigned long)dev);
xa_unlock(&xen_grant_dma_devices);
return data;
}
/*
* DMA ops for Xen frontends (e.g. virtio).
*
* Used to act as a kind of software IOMMU for Xen guests by using grants as
* DMA addresses.
* Such a DMA address is formed by using the grant reference as a frame
* number and setting the highest address bit (this bit is for the backend
* to be able to distinguish it from e.g. a mmio address).
*/
static void *xen_grant_dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
unsigned long attrs)
{
struct xen_grant_dma_data *data;
unsigned int i, n_pages = PFN_UP(size);
unsigned long pfn;
grant_ref_t grant;
void *ret;
data = find_xen_grant_dma_data(dev);
if (!data)
return NULL;
if (unlikely(data->broken))
return NULL;
ret = alloc_pages_exact(n_pages * PAGE_SIZE, gfp);
if (!ret)
return NULL;
pfn = virt_to_pfn(ret);
if (gnttab_alloc_grant_reference_seq(n_pages, &grant)) {
free_pages_exact(ret, n_pages * PAGE_SIZE);
return NULL;
}
for (i = 0; i < n_pages; i++) {
gnttab_grant_foreign_access_ref(grant + i, data->backend_domid,
pfn_to_gfn(pfn + i), 0);
}
*dma_handle = grant_to_dma(grant);
return ret;
}
static void xen_grant_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
struct xen_grant_dma_data *data;
unsigned int i, n_pages = PFN_UP(size);
grant_ref_t grant;
data = find_xen_grant_dma_data(dev);
if (!data)
return;
if (unlikely(data->broken))
return;
grant = dma_to_grant(dma_handle);
for (i = 0; i < n_pages; i++) {
if (unlikely(!gnttab_end_foreign_access_ref(grant + i))) {
dev_alert(dev, "Grant still in use by backend domain, disabled for further use\n");
data->broken = true;
return;
}
}
gnttab_free_grant_reference_seq(grant, n_pages);
free_pages_exact(vaddr, n_pages * PAGE_SIZE);
}
static struct page *xen_grant_dma_alloc_pages(struct device *dev, size_t size,
dma_addr_t *dma_handle,
enum dma_data_direction dir,
gfp_t gfp)
{
void *vaddr;
vaddr = xen_grant_dma_alloc(dev, size, dma_handle, gfp, 0);
if (!vaddr)
return NULL;
return virt_to_page(vaddr);
}
static void xen_grant_dma_free_pages(struct device *dev, size_t size,
struct page *vaddr, dma_addr_t dma_handle,
enum dma_data_direction dir)
{
xen_grant_dma_free(dev, size, page_to_virt(vaddr), dma_handle, 0);
}
static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
unsigned long attrs)
{
struct xen_grant_dma_data *data;
unsigned int i, n_pages = PFN_UP(size);
grant_ref_t grant;
dma_addr_t dma_handle;
if (WARN_ON(dir == DMA_NONE))
return DMA_MAPPING_ERROR;
data = find_xen_grant_dma_data(dev);
if (!data)
return DMA_MAPPING_ERROR;
if (unlikely(data->broken))
return DMA_MAPPING_ERROR;
if (gnttab_alloc_grant_reference_seq(n_pages, &grant))
return DMA_MAPPING_ERROR;
for (i = 0; i < n_pages; i++) {
gnttab_grant_foreign_access_ref(grant + i, data->backend_domid,
xen_page_to_gfn(page) + i, dir == DMA_TO_DEVICE);
}
dma_handle = grant_to_dma(grant) + offset;
return dma_handle;
}
static void xen_grant_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
struct xen_grant_dma_data *data;
unsigned int i, n_pages = PFN_UP(size);
grant_ref_t grant;
if (WARN_ON(dir == DMA_NONE))
return;
data = find_xen_grant_dma_data(dev);
if (!data)
return;
if (unlikely(data->broken))
return;
grant = dma_to_grant(dma_handle);
for (i = 0; i < n_pages; i++) {
if (unlikely(!gnttab_end_foreign_access_ref(grant + i))) {
dev_alert(dev, "Grant still in use by backend domain, disabled for further use\n");
data->broken = true;
return;
}
}
gnttab_free_grant_reference_seq(grant, n_pages);
}
static void xen_grant_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
unsigned long attrs)
{
struct scatterlist *s;
unsigned int i;
if (WARN_ON(dir == DMA_NONE))
return;
for_each_sg(sg, s, nents, i)
xen_grant_dma_unmap_page(dev, s->dma_address, sg_dma_len(s), dir,
attrs);
}
static int xen_grant_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
unsigned long attrs)
{
struct scatterlist *s;
unsigned int i;
if (WARN_ON(dir == DMA_NONE))
return -EINVAL;
for_each_sg(sg, s, nents, i) {
s->dma_address = xen_grant_dma_map_page(dev, sg_page(s), s->offset,
s->length, dir, attrs);
if (s->dma_address == DMA_MAPPING_ERROR)
goto out;
sg_dma_len(s) = s->length;
}
return nents;
out:
xen_grant_dma_unmap_sg(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
sg_dma_len(sg) = 0;
return -EIO;
}
static int xen_grant_dma_supported(struct device *dev, u64 mask)
{
return mask == DMA_BIT_MASK(64);
}
static const struct dma_map_ops xen_grant_dma_ops = {
.alloc = xen_grant_dma_alloc,
.free = xen_grant_dma_free,
.alloc_pages = xen_grant_dma_alloc_pages,
.free_pages = xen_grant_dma_free_pages,
.mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable,
.map_page = xen_grant_dma_map_page,
.unmap_page = xen_grant_dma_unmap_page,
.map_sg = xen_grant_dma_map_sg,
.unmap_sg = xen_grant_dma_unmap_sg,
.dma_supported = xen_grant_dma_supported,
};
bool xen_is_grant_dma_device(struct device *dev)
{
struct device_node *iommu_np;
bool has_iommu;
/* XXX Handle only DT devices for now */
if (!dev->of_node)
return false;
iommu_np = of_parse_phandle(dev->of_node, "iommus", 0);
has_iommu = iommu_np && of_device_is_compatible(iommu_np, "xen,grant-dma");
of_node_put(iommu_np);
return has_iommu;
}
void xen_grant_setup_dma_ops(struct device *dev)
{
struct xen_grant_dma_data *data;
struct of_phandle_args iommu_spec;
data = find_xen_grant_dma_data(dev);
if (data) {
dev_err(dev, "Xen grant DMA data is already created\n");
return;
}
/* XXX ACPI device unsupported for now */
if (!dev->of_node)
goto err;
if (of_parse_phandle_with_args(dev->of_node, "iommus", "#iommu-cells",
0, &iommu_spec)) {
dev_err(dev, "Cannot parse iommus property\n");
goto err;
}
if (!of_device_is_compatible(iommu_spec.np, "xen,grant-dma") ||
iommu_spec.args_count != 1) {
dev_err(dev, "Incompatible IOMMU node\n");
of_node_put(iommu_spec.np);
goto err;
}
of_node_put(iommu_spec.np);
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
goto err;
/*
* The endpoint ID here means the ID of the domain where the corresponding
* backend is running
*/
data->backend_domid = iommu_spec.args[0];
if (xa_err(xa_store(&xen_grant_dma_devices, (unsigned long)dev, data,
GFP_KERNEL))) {
dev_err(dev, "Cannot store Xen grant DMA data\n");
goto err;
}
dev->dma_ops = &xen_grant_dma_ops;
return;
err:
dev_err(dev, "Cannot set up Xen grant DMA ops, retain platform DMA ops\n");
}
MODULE_DESCRIPTION("Xen grant DMA-mapping layer");
MODULE_AUTHOR("Juergen Gross <jgross@suse.com>");
MODULE_LICENSE("GPL");

View File

@ -33,6 +33,7 @@
#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
#include <linux/bitmap.h>
#include <linux/memblock.h>
#include <linux/sched.h>
#include <linux/mm.h>
@ -70,9 +71,32 @@
static grant_ref_t **gnttab_list;
static unsigned int nr_grant_frames;
/*
* Handling of free grants:
*
* Free grants are in a simple list anchored in gnttab_free_head. They are
* linked by grant ref, the last element contains GNTTAB_LIST_END. The number
* of free entries is stored in gnttab_free_count.
* Additionally there is a bitmap of free entries anchored in
* gnttab_free_bitmap. This is being used for simplifying allocation of
* multiple consecutive grants, which is needed e.g. for support of virtio.
* gnttab_last_free is used to add free entries of new frames at the end of
* the free list.
* gnttab_free_tail_ptr specifies the variable which references the start
* of consecutive free grants ending with gnttab_last_free. This pointer is
* updated in a rather defensive way, in order to avoid performance hits in
* hot paths.
* All those variables are protected by gnttab_list_lock.
*/
static int gnttab_free_count;
static grant_ref_t gnttab_free_head;
static unsigned int gnttab_size;
static grant_ref_t gnttab_free_head = GNTTAB_LIST_END;
static grant_ref_t gnttab_last_free = GNTTAB_LIST_END;
static grant_ref_t *gnttab_free_tail_ptr;
static unsigned long *gnttab_free_bitmap;
static DEFINE_SPINLOCK(gnttab_list_lock);
struct grant_frames xen_auto_xlat_grant_frames;
static unsigned int xen_gnttab_version;
module_param_named(version, xen_gnttab_version, uint, 0);
@ -168,16 +192,116 @@ static int get_free_entries(unsigned count)
ref = head = gnttab_free_head;
gnttab_free_count -= count;
while (count-- > 1)
head = gnttab_entry(head);
while (count--) {
bitmap_clear(gnttab_free_bitmap, head, 1);
if (gnttab_free_tail_ptr == __gnttab_entry(head))
gnttab_free_tail_ptr = &gnttab_free_head;
if (count)
head = gnttab_entry(head);
}
gnttab_free_head = gnttab_entry(head);
gnttab_entry(head) = GNTTAB_LIST_END;
if (!gnttab_free_count) {
gnttab_last_free = GNTTAB_LIST_END;
gnttab_free_tail_ptr = NULL;
}
spin_unlock_irqrestore(&gnttab_list_lock, flags);
return ref;
}
static int get_seq_entry_count(void)
{
if (gnttab_last_free == GNTTAB_LIST_END || !gnttab_free_tail_ptr ||
*gnttab_free_tail_ptr == GNTTAB_LIST_END)
return 0;
return gnttab_last_free - *gnttab_free_tail_ptr + 1;
}
/* Rebuilds the free grant list and tries to find count consecutive entries. */
static int get_free_seq(unsigned int count)
{
int ret = -ENOSPC;
unsigned int from, to;
grant_ref_t *last;
gnttab_free_tail_ptr = &gnttab_free_head;
last = &gnttab_free_head;
for (from = find_first_bit(gnttab_free_bitmap, gnttab_size);
from < gnttab_size;
from = find_next_bit(gnttab_free_bitmap, gnttab_size, to + 1)) {
to = find_next_zero_bit(gnttab_free_bitmap, gnttab_size,
from + 1);
if (ret < 0 && to - from >= count) {
ret = from;
bitmap_clear(gnttab_free_bitmap, ret, count);
from += count;
gnttab_free_count -= count;
if (from == to)
continue;
}
/*
* Recreate the free list in order to have it properly sorted.
* This is needed to make sure that the free tail has the maximum
* possible size.
*/
while (from < to) {
*last = from;
last = __gnttab_entry(from);
gnttab_last_free = from;
from++;
}
if (to < gnttab_size)
gnttab_free_tail_ptr = __gnttab_entry(to - 1);
}
*last = GNTTAB_LIST_END;
if (gnttab_last_free != gnttab_size - 1)
gnttab_free_tail_ptr = NULL;
return ret;
}
static int get_free_entries_seq(unsigned int count)
{
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&gnttab_list_lock, flags);
if (gnttab_free_count < count) {
ret = gnttab_expand(count - gnttab_free_count);
if (ret < 0)
goto out;
}
if (get_seq_entry_count() < count) {
ret = get_free_seq(count);
if (ret >= 0)
goto out;
ret = gnttab_expand(count - get_seq_entry_count());
if (ret < 0)
goto out;
}
ret = *gnttab_free_tail_ptr;
*gnttab_free_tail_ptr = gnttab_entry(ret + count - 1);
gnttab_free_count -= count;
if (!gnttab_free_count)
gnttab_free_tail_ptr = NULL;
bitmap_clear(gnttab_free_bitmap, ret, count);
out:
spin_unlock_irqrestore(&gnttab_list_lock, flags);
return ret;
}
static void do_free_callbacks(void)
{
struct gnttab_free_callback *callback, *next;
@ -204,21 +328,51 @@ static inline void check_free_callbacks(void)
do_free_callbacks();
}
static void put_free_entry_locked(grant_ref_t ref)
{
if (unlikely(ref < GNTTAB_NR_RESERVED_ENTRIES))
return;
gnttab_entry(ref) = gnttab_free_head;
gnttab_free_head = ref;
if (!gnttab_free_count)
gnttab_last_free = ref;
if (gnttab_free_tail_ptr == &gnttab_free_head)
gnttab_free_tail_ptr = __gnttab_entry(ref);
gnttab_free_count++;
bitmap_set(gnttab_free_bitmap, ref, 1);
}
static void put_free_entry(grant_ref_t ref)
{
unsigned long flags;
if (unlikely(ref < GNTTAB_NR_RESERVED_ENTRIES))
return;
spin_lock_irqsave(&gnttab_list_lock, flags);
gnttab_entry(ref) = gnttab_free_head;
gnttab_free_head = ref;
gnttab_free_count++;
put_free_entry_locked(ref);
check_free_callbacks();
spin_unlock_irqrestore(&gnttab_list_lock, flags);
}
static void gnttab_set_free(unsigned int start, unsigned int n)
{
unsigned int i;
for (i = start; i < start + n - 1; i++)
gnttab_entry(i) = i + 1;
gnttab_entry(i) = GNTTAB_LIST_END;
if (!gnttab_free_count) {
gnttab_free_head = start;
gnttab_free_tail_ptr = &gnttab_free_head;
} else {
gnttab_entry(gnttab_last_free) = start;
}
gnttab_free_count += n;
gnttab_last_free = i;
bitmap_set(gnttab_free_bitmap, start, n);
}
/*
* Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2.
* Introducing a valid entry into the grant table:
@ -450,23 +604,31 @@ void gnttab_free_grant_references(grant_ref_t head)
{
grant_ref_t ref;
unsigned long flags;
int count = 1;
if (head == GNTTAB_LIST_END)
return;
spin_lock_irqsave(&gnttab_list_lock, flags);
ref = head;
while (gnttab_entry(ref) != GNTTAB_LIST_END) {
ref = gnttab_entry(ref);
count++;
while (head != GNTTAB_LIST_END) {
ref = gnttab_entry(head);
put_free_entry_locked(head);
head = ref;
}
gnttab_entry(ref) = gnttab_free_head;
gnttab_free_head = head;
gnttab_free_count += count;
check_free_callbacks();
spin_unlock_irqrestore(&gnttab_list_lock, flags);
}
EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
void gnttab_free_grant_reference_seq(grant_ref_t head, unsigned int count)
{
unsigned long flags;
unsigned int i;
spin_lock_irqsave(&gnttab_list_lock, flags);
for (i = count; i > 0; i--)
put_free_entry_locked(head + i - 1);
check_free_callbacks();
spin_unlock_irqrestore(&gnttab_list_lock, flags);
}
EXPORT_SYMBOL_GPL(gnttab_free_grant_reference_seq);
int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
{
int h = get_free_entries(count);
@ -480,6 +642,24 @@ int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
}
EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
int gnttab_alloc_grant_reference_seq(unsigned int count, grant_ref_t *first)
{
int h;
if (count == 1)
h = get_free_entries(1);
else
h = get_free_entries_seq(count);
if (h < 0)
return -ENOSPC;
*first = h;
return 0;
}
EXPORT_SYMBOL_GPL(gnttab_alloc_grant_reference_seq);
int gnttab_empty_grant_references(const grant_ref_t *private_head)
{
return (*private_head == GNTTAB_LIST_END);
@ -572,16 +752,13 @@ static int grow_gnttab_list(unsigned int more_frames)
goto grow_nomem;
}
gnttab_set_free(gnttab_size, extra_entries);
for (i = grefs_per_frame * nr_grant_frames;
i < grefs_per_frame * new_nr_grant_frames - 1; i++)
gnttab_entry(i) = i + 1;
gnttab_entry(i) = gnttab_free_head;
gnttab_free_head = grefs_per_frame * nr_grant_frames;
gnttab_free_count += extra_entries;
if (!gnttab_free_tail_ptr)
gnttab_free_tail_ptr = __gnttab_entry(gnttab_size);
nr_grant_frames = new_nr_grant_frames;
gnttab_size += extra_entries;
check_free_callbacks();
@ -1424,20 +1601,20 @@ static int gnttab_expand(unsigned int req_entries)
int gnttab_init(void)
{
int i;
unsigned long max_nr_grant_frames;
unsigned long max_nr_grant_frames, max_nr_grefs;
unsigned int max_nr_glist_frames, nr_glist_frames;
unsigned int nr_init_grefs;
int ret;
gnttab_request_version();
max_nr_grant_frames = gnttab_max_grant_frames();
max_nr_grefs = max_nr_grant_frames *
gnttab_interface->grefs_per_grant_frame;
nr_grant_frames = 1;
/* Determine the maximum number of frames required for the
* grant reference free list on the current hypervisor.
*/
max_nr_glist_frames = (max_nr_grant_frames *
gnttab_interface->grefs_per_grant_frame / RPP);
max_nr_glist_frames = max_nr_grefs / RPP;
gnttab_list = kmalloc_array(max_nr_glist_frames,
sizeof(grant_ref_t *),
@ -1454,6 +1631,12 @@ int gnttab_init(void)
}
}
gnttab_free_bitmap = bitmap_zalloc(max_nr_grefs, GFP_KERNEL);
if (!gnttab_free_bitmap) {
ret = -ENOMEM;
goto ini_nomem;
}
ret = arch_gnttab_init(max_nr_grant_frames,
nr_status_frames(max_nr_grant_frames));
if (ret < 0)
@ -1464,15 +1647,10 @@ int gnttab_init(void)
goto ini_nomem;
}
nr_init_grefs = nr_grant_frames *
gnttab_interface->grefs_per_grant_frame;
gnttab_size = nr_grant_frames * gnttab_interface->grefs_per_grant_frame;
for (i = GNTTAB_NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
gnttab_entry(i) = i + 1;
gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END;
gnttab_free_count = nr_init_grefs - GNTTAB_NR_RESERVED_ENTRIES;
gnttab_free_head = GNTTAB_NR_RESERVED_ENTRIES;
gnttab_set_free(GNTTAB_NR_RESERVED_ENTRIES,
gnttab_size - GNTTAB_NR_RESERVED_ENTRIES);
printk("Grant table initialized\n");
return 0;
@ -1481,6 +1659,7 @@ int gnttab_init(void)
for (i--; i >= 0; i--)
free_page((unsigned long)gnttab_list[i]);
kfree(gnttab_list);
bitmap_free(gnttab_free_bitmap);
return ret;
}
EXPORT_SYMBOL_GPL(gnttab_init);

View File

@ -261,7 +261,6 @@ int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt,
return 0;
}
EXPORT_SYMBOL_GPL(xen_xlate_map_ballooned_pages);
struct remap_pfn {
struct mm_struct *mm;

View File

@ -44,6 +44,7 @@ mandatory-y += msi.h
mandatory-y += pci.h
mandatory-y += percpu.h
mandatory-y += pgalloc.h
mandatory-y += platform-feature.h
mandatory-y += preempt.h
mandatory-y += rwonce.h
mandatory-y += sections.h

View File

@ -0,0 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_GENERIC_PLATFORM_FEATURE_H
#define _ASM_GENERIC_PLATFORM_FEATURE_H
/* Number of arch specific feature flags. */
#define PLATFORM_ARCH_FEAT_N 0
#endif /* _ASM_GENERIC_PLATFORM_FEATURE_H */

View File

@ -0,0 +1,19 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _PLATFORM_FEATURE_H
#define _PLATFORM_FEATURE_H
#include <linux/bitops.h>
#include <asm/platform-feature.h>
/* The platform features are starting with the architecture specific ones. */
/* Used to enable platform specific DMA handling for virtio devices. */
#define PLATFORM_VIRTIO_RESTRICTED_MEM_ACCESS (0 + PLATFORM_ARCH_FEAT_N)
#define PLATFORM_FEAT_N (1 + PLATFORM_ARCH_FEAT_N)
void platform_set(unsigned int feature);
void platform_clear(unsigned int feature);
bool platform_has(unsigned int feature);
#endif /* _PLATFORM_FEATURE_H */

View File

@ -604,13 +604,4 @@ static inline void virtio_cwrite64(struct virtio_device *vdev,
_r; \
})
#ifdef CONFIG_ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
int arch_has_restricted_virtio_memory_access(void);
#else
static inline int arch_has_restricted_virtio_memory_access(void)
{
return 0;
}
#endif /* CONFIG_ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS */
#endif /* _LINUX_VIRTIO_CONFIG_H */

18
include/xen/arm/xen-ops.h Normal file
View File

@ -0,0 +1,18 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_ARM_XEN_OPS_H
#define _ASM_ARM_XEN_OPS_H
#include <xen/swiotlb-xen.h>
#include <xen/xen-ops.h>
static inline void xen_setup_dma_ops(struct device *dev)
{
#ifdef CONFIG_XEN
if (xen_is_grant_dma_device(dev))
xen_grant_setup_dma_ops(dev);
else if (xen_swiotlb_detect())
dev->dma_ops = &xen_swiotlb_dma_ops;
#endif
}
#endif /* _ASM_ARM_XEN_OPS_H */

View File

@ -127,10 +127,14 @@ int gnttab_try_end_foreign_access(grant_ref_t ref);
*/
int gnttab_alloc_grant_references(u16 count, grant_ref_t *pprivate_head);
int gnttab_alloc_grant_reference_seq(unsigned int count, grant_ref_t *first);
void gnttab_free_grant_reference(grant_ref_t ref);
void gnttab_free_grant_references(grant_ref_t head);
void gnttab_free_grant_reference_seq(grant_ref_t head, unsigned int count);
int gnttab_empty_grant_references(const grant_ref_t *pprivate_head);
int gnttab_claim_grant_reference(grant_ref_t *pprivate_head);

View File

@ -214,4 +214,17 @@ static inline void xen_preemptible_hcall_end(void) { }
#endif /* CONFIG_XEN_PV && !CONFIG_PREEMPTION */
#ifdef CONFIG_XEN_GRANT_DMA_OPS
void xen_grant_setup_dma_ops(struct device *dev);
bool xen_is_grant_dma_device(struct device *dev);
#else
static inline void xen_grant_setup_dma_ops(struct device *dev)
{
}
static inline bool xen_is_grant_dma_device(struct device *dev)
{
return false;
}
#endif /* CONFIG_XEN_GRANT_DMA_OPS */
#endif /* INCLUDE_XEN_OPS_H */

View File

@ -52,6 +52,14 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
extern u64 xen_saved_max_mem_size;
#endif
#include <linux/platform-feature.h>
static inline void xen_set_restricted_virtio_memory_access(void)
{
if (IS_ENABLED(CONFIG_XEN_VIRTIO) && xen_domain())
platform_set(PLATFORM_VIRTIO_RESTRICTED_MEM_ACCESS);
}
#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages);
void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages);

View File

@ -7,7 +7,7 @@ obj-y = fork.o exec_domain.o panic.o \
cpu.o exit.o softirq.o resource.o \
sysctl.o capability.o ptrace.o user.o \
signal.o sys.o umh.o workqueue.o pid.o task_work.o \
extable.o params.o \
extable.o params.o platform-feature.o \
kthread.o sys_ni.o nsproxy.o \
notifier.o ksysfs.o cred.o reboot.o \
async.o range.o smpboot.o ucount.o regset.o

27
kernel/platform-feature.c Normal file
View File

@ -0,0 +1,27 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/bitops.h>
#include <linux/cache.h>
#include <linux/export.h>
#include <linux/platform-feature.h>
#define PLATFORM_FEAT_ARRAY_SZ BITS_TO_LONGS(PLATFORM_FEAT_N)
static unsigned long __read_mostly platform_features[PLATFORM_FEAT_ARRAY_SZ];
void platform_set(unsigned int feature)
{
set_bit(feature, platform_features);
}
EXPORT_SYMBOL_GPL(platform_set);
void platform_clear(unsigned int feature)
{
clear_bit(feature, platform_features);
}
EXPORT_SYMBOL_GPL(platform_clear);
bool platform_has(unsigned int feature)
{
return test_bit(feature, platform_features);
}
EXPORT_SYMBOL_GPL(platform_has);