sh: use dma_direct_ops for the CONFIG_DMA_COHERENT case

This is a slight change in behavior as we avoid the detour through the
virtual mapping for the coherent allocator, but if this CPU really is
coherent that should be the right thing to do.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Yoshinori Sato <ysato@users.sourceforge.jp>
This commit is contained in:
Christoph Hellwig 2018-04-17 22:02:10 +02:00
parent 47fcae0d2a
commit a602915f5d
4 changed files with 7 additions and 6 deletions

View file

@ -159,6 +159,7 @@ config SWAP_IO_SPACE
bool bool
config DMA_COHERENT config DMA_COHERENT
select DMA_DIRECT_OPS
bool bool
config DMA_NONCOHERENT config DMA_NONCOHERENT

View file

@ -6,7 +6,11 @@ extern const struct dma_map_ops nommu_dma_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{ {
#ifdef CONFIG_DMA_NONCOHERENT
return &nommu_dma_ops; return &nommu_dma_ops;
#else
return &dma_direct_ops;
#endif
} }
extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,

View file

@ -12,7 +12,7 @@ endif
CFLAGS_REMOVE_return_address.o = -pg CFLAGS_REMOVE_return_address.o = -pg
obj-y := debugtraps.o dma-nommu.o dumpstack.o \ obj-y := debugtraps.o dumpstack.o \
idle.o io.o irq.o irq_$(BITS).o kdebugfs.o \ idle.o io.o irq.o irq_$(BITS).o kdebugfs.o \
machvec.o nmi_debug.o process.o \ machvec.o nmi_debug.o process.o \
process_$(BITS).o ptrace.o ptrace_$(BITS).o \ process_$(BITS).o ptrace.o ptrace_$(BITS).o \
@ -45,7 +45,7 @@ obj-$(CONFIG_DUMP_CODE) += disassemble.o
obj-$(CONFIG_HIBERNATION) += swsusp.o obj-$(CONFIG_HIBERNATION) += swsusp.o
obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_callchain.o obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_callchain.o
obj-$(CONFIG_DMA_NONCOHERENT) += dma-nommu.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
ccflags-y := -Werror ccflags-y := -Werror

View file

@ -51,7 +51,6 @@ static int nommu_map_sg(struct device *dev, struct scatterlist *sg,
return nents; return nents;
} }
#ifdef CONFIG_DMA_NONCOHERENT
static void nommu_sync_single_for_device(struct device *dev, dma_addr_t addr, static void nommu_sync_single_for_device(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir) size_t size, enum dma_data_direction dir)
{ {
@ -67,16 +66,13 @@ static void nommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
for_each_sg(sg, s, nelems, i) for_each_sg(sg, s, nelems, i)
sh_sync_dma_for_device(sg_virt(s), s->length, dir); sh_sync_dma_for_device(sg_virt(s), s->length, dir);
} }
#endif
const struct dma_map_ops nommu_dma_ops = { const struct dma_map_ops nommu_dma_ops = {
.alloc = dma_generic_alloc_coherent, .alloc = dma_generic_alloc_coherent,
.free = dma_generic_free_coherent, .free = dma_generic_free_coherent,
.map_page = nommu_map_page, .map_page = nommu_map_page,
.map_sg = nommu_map_sg, .map_sg = nommu_map_sg,
#ifdef CONFIG_DMA_NONCOHERENT
.sync_single_for_device = nommu_sync_single_for_device, .sync_single_for_device = nommu_sync_single_for_device,
.sync_sg_for_device = nommu_sync_sg_for_device, .sync_sg_for_device = nommu_sync_sg_for_device,
#endif
}; };
EXPORT_SYMBOL(nommu_dma_ops); EXPORT_SYMBOL(nommu_dma_ops);