2008-10-16 05:01:03 +00:00
|
|
|
#ifndef __LINUX_SWIOTLB_H
|
|
|
|
#define __LINUX_SWIOTLB_H
|
|
|
|
|
2015-07-01 12:17:58 +00:00
|
|
|
#include <linux/dma-direction.h>
|
|
|
|
#include <linux/init.h>
|
2008-10-16 05:01:03 +00:00
|
|
|
#include <linux/types.h>
|
|
|
|
|
|
|
|
struct device;
|
2015-07-01 12:17:58 +00:00
|
|
|
struct page;
|
2008-10-16 05:01:03 +00:00
|
|
|
struct scatterlist;
|
|
|
|
|
2016-12-16 13:28:41 +00:00
|
|
|
enum swiotlb_force {
|
|
|
|
SWIOTLB_NORMAL, /* Default - depending on HW DMA mask etc. */
|
|
|
|
SWIOTLB_FORCE, /* swiotlb=force */
|
2016-12-16 13:28:42 +00:00
|
|
|
SWIOTLB_NO_FORCE, /* swiotlb=noforce */
|
2016-12-16 13:28:41 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
extern enum swiotlb_force swiotlb_force;
|
2009-11-14 11:46:38 +00:00
|
|
|
|
2008-12-16 20:17:27 +00:00
|
|
|
/*
|
|
|
|
* Maximum allowable number of contiguous slabs to map,
|
|
|
|
* must be a power of 2. What is the appropriate value ?
|
|
|
|
* The complexity of {map,unmap}_single is linearly dependent on this value.
|
|
|
|
*/
|
|
|
|
#define IO_TLB_SEGSIZE 128
|
|
|
|
|
|
|
|
/*
|
|
|
|
* log of the size of each IO TLB slab. The number of slabs is command line
|
|
|
|
* controllable.
|
|
|
|
*/
|
|
|
|
#define IO_TLB_SHIFT 11
|
|
|
|
|
2009-11-10 10:46:19 +00:00
|
|
|
extern void swiotlb_init(int verbose);
|
x86: Don't panic if can not alloc buffer for swiotlb
Normal boot path on system with iommu support:
swiotlb buffer will be allocated early at first and then try to initialize
iommu, if iommu for intel or AMD could setup properly, swiotlb buffer
will be freed.
The early allocating is with bootmem, and could panic when we try to use
kdump with buffer above 4G only, or with memmap to limit mem under 4G.
for example: memmap=4095M$1M to remove memory under 4G.
According to Eric, add _nopanic version and no_iotlb_memory to fail
map single later if swiotlb is still needed.
-v2: don't pass nopanic, and use -ENOMEM return value according to Eric.
panic early instead of using swiotlb_full to panic...according to Eric/Konrad.
-v3: make swiotlb_init to be notpanic, but will affect:
arm64, ia64, powerpc, tile, unicore32, x86.
-v4: cleanup swiotlb_init by removing swiotlb_init_with_default_size.
Suggested-by: Eric W. Biederman <ebiederm@xmission.com>
Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Link: http://lkml.kernel.org/r/1359058816-7615-36-git-send-email-yinghai@kernel.org
Reviewed-and-tested-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: Kyungmin Park <kyungmin.park@samsung.com>
Cc: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
Cc: linux-mips@linux-mips.org
Cc: xen-devel@lists.xensource.com
Cc: virtualization@lists.linux-foundation.org
Cc: Shuah Khan <shuahkhan@gmail.com>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
2013-01-24 20:20:16 +00:00
|
|
|
int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
|
2011-08-11 20:50:56 +00:00
|
|
|
extern unsigned long swiotlb_nr_tbl(void);
|
2013-04-16 05:23:45 +00:00
|
|
|
unsigned long swiotlb_size_or_default(void);
|
2012-07-28 00:55:27 +00:00
|
|
|
extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs);
|
2008-10-16 05:01:03 +00:00
|
|
|
|
2010-05-28 15:37:10 +00:00
|
|
|
/*
|
|
|
|
* Enumeration for sync targets
|
|
|
|
*/
|
|
|
|
enum dma_sync_target {
|
|
|
|
SYNC_FOR_CPU = 0,
|
|
|
|
SYNC_FOR_DEVICE = 1,
|
|
|
|
};
|
2012-10-15 17:19:39 +00:00
|
|
|
|
|
|
|
/* define the last possible byte of physical address space as a mapping error */
|
|
|
|
#define SWIOTLB_MAP_ERROR (~(phys_addr_t)0x0)
|
|
|
|
|
|
|
|
extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
|
|
|
|
dma_addr_t tbl_dma_addr,
|
|
|
|
phys_addr_t phys, size_t size,
|
2016-11-02 11:13:02 +00:00
|
|
|
enum dma_data_direction dir,
|
|
|
|
unsigned long attrs);
|
2010-05-28 15:37:10 +00:00
|
|
|
|
2012-10-15 17:19:44 +00:00
|
|
|
extern void swiotlb_tbl_unmap_single(struct device *hwdev,
|
|
|
|
phys_addr_t tlb_addr,
|
2016-11-02 11:13:02 +00:00
|
|
|
size_t size, enum dma_data_direction dir,
|
|
|
|
unsigned long attrs);
|
2010-05-28 15:37:10 +00:00
|
|
|
|
2012-10-15 17:19:49 +00:00
|
|
|
extern void swiotlb_tbl_sync_single(struct device *hwdev,
|
|
|
|
phys_addr_t tlb_addr,
|
2010-05-28 15:37:10 +00:00
|
|
|
size_t size, enum dma_data_direction dir,
|
|
|
|
enum dma_sync_target target);
|
|
|
|
|
|
|
|
/* Accessory functions. */
|
2008-10-16 05:01:03 +00:00
|
|
|
extern void
|
|
|
|
*swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
|
|
|
dma_addr_t *dma_handle, gfp_t flags);
|
|
|
|
|
|
|
|
extern void
|
|
|
|
swiotlb_free_coherent(struct device *hwdev, size_t size,
|
|
|
|
void *vaddr, dma_addr_t dma_handle);
|
|
|
|
|
2009-01-05 14:59:03 +00:00
|
|
|
extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
|
|
|
|
unsigned long offset, size_t size,
|
|
|
|
enum dma_data_direction dir,
|
2016-08-03 20:46:00 +00:00
|
|
|
unsigned long attrs);
|
2009-01-05 14:59:03 +00:00
|
|
|
extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
|
|
|
|
size_t size, enum dma_data_direction dir,
|
2016-08-03 20:46:00 +00:00
|
|
|
unsigned long attrs);
|
2008-10-16 05:01:03 +00:00
|
|
|
|
|
|
|
extern int
|
|
|
|
swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
|
2016-08-03 20:46:00 +00:00
|
|
|
enum dma_data_direction dir,
|
|
|
|
unsigned long attrs);
|
2008-10-16 05:01:03 +00:00
|
|
|
|
|
|
|
extern void
|
|
|
|
swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
|
2009-01-05 14:59:02 +00:00
|
|
|
int nelems, enum dma_data_direction dir,
|
2016-08-03 20:46:00 +00:00
|
|
|
unsigned long attrs);
|
2008-10-16 05:01:03 +00:00
|
|
|
|
|
|
|
extern void
|
|
|
|
swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
|
2009-01-05 14:59:02 +00:00
|
|
|
size_t size, enum dma_data_direction dir);
|
2008-10-16 05:01:03 +00:00
|
|
|
|
|
|
|
extern void
|
|
|
|
swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
|
2009-01-05 14:59:02 +00:00
|
|
|
int nelems, enum dma_data_direction dir);
|
2008-10-16 05:01:03 +00:00
|
|
|
|
|
|
|
extern void
|
|
|
|
swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
|
2009-01-05 14:59:02 +00:00
|
|
|
size_t size, enum dma_data_direction dir);
|
2008-10-16 05:01:03 +00:00
|
|
|
|
|
|
|
extern void
|
|
|
|
swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
|
2009-01-05 14:59:02 +00:00
|
|
|
int nelems, enum dma_data_direction dir);
|
2008-10-16 05:01:03 +00:00
|
|
|
|
|
|
|
extern int
|
|
|
|
swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
|
|
|
|
|
|
|
|
extern int
|
|
|
|
swiotlb_dma_supported(struct device *hwdev, u64 mask);
|
|
|
|
|
2009-11-10 10:46:18 +00:00
|
|
|
#ifdef CONFIG_SWIOTLB
|
|
|
|
extern void __init swiotlb_free(void);
|
2016-12-20 15:02:02 +00:00
|
|
|
unsigned int swiotlb_max_segment(void);
|
2009-11-10 10:46:18 +00:00
|
|
|
#else
|
|
|
|
static inline void swiotlb_free(void) { }
|
2016-12-20 15:02:02 +00:00
|
|
|
static inline unsigned int swiotlb_max_segment(void) { return 0; }
|
2009-11-10 10:46:18 +00:00
|
|
|
#endif
|
|
|
|
|
2009-11-10 10:46:19 +00:00
|
|
|
extern void swiotlb_print_info(void);
|
2014-06-04 23:06:50 +00:00
|
|
|
extern int is_swiotlb_buffer(phys_addr_t paddr);
|
2016-12-20 15:02:02 +00:00
|
|
|
extern void swiotlb_set_max_segment(unsigned int);
|
2014-06-04 23:06:50 +00:00
|
|
|
|
2008-10-16 05:01:03 +00:00
|
|
|
#endif /* __LINUX_SWIOTLB_H */
|