2005-04-16 22:20:36 +00:00
|
|
|
#ifndef _LINUX_HIGHMEM_H
|
|
|
|
#define _LINUX_HIGHMEM_H
|
|
|
|
|
|
|
|
#include <linux/fs.h>
|
2010-08-10 00:18:32 +00:00
|
|
|
#include <linux/kernel.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/mm.h>
|
2006-12-07 04:32:21 +00:00
|
|
|
#include <linux/uaccess.h>
|
2010-11-11 22:05:10 +00:00
|
|
|
#include <linux/hardirq.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#include <asm/cacheflush.h>
|
|
|
|
|
2006-03-26 09:36:57 +00:00
|
|
|
#ifndef ARCH_HAS_FLUSH_ANON_PAGE
|
2006-12-30 22:24:19 +00:00
|
|
|
static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
|
2006-03-26 09:36:57 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2006-03-26 09:36:59 +00:00
|
|
|
#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
|
|
|
|
static inline void flush_kernel_dcache_page(struct page *page)
|
|
|
|
{
|
|
|
|
}
|
2010-01-25 17:42:20 +00:00
|
|
|
static inline void flush_kernel_vmap_range(void *vaddr, int size)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
|
|
|
|
{
|
|
|
|
}
|
2006-03-26 09:36:59 +00:00
|
|
|
#endif
|
|
|
|
|
2009-04-02 04:38:49 +00:00
|
|
|
#include <asm/kmap_types.h>
|
|
|
|
|
|
|
|
#ifdef CONFIG_HIGHMEM
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <asm/highmem.h>
|
|
|
|
|
|
|
|
/* declarations for linux/mm/highmem.c */
|
|
|
|
unsigned int nr_free_highpages(void);
|
2006-09-26 06:31:11 +00:00
|
|
|
extern unsigned long totalhigh_pages;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-05-02 17:27:15 +00:00
|
|
|
void kmap_flush_unused(void);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#else /* CONFIG_HIGHMEM */
|
|
|
|
|
|
|
|
static inline unsigned int nr_free_highpages(void) { return 0; }
|
|
|
|
|
2010-01-08 22:42:31 +00:00
|
|
|
#define totalhigh_pages 0UL
|
2006-09-26 06:31:11 +00:00
|
|
|
|
2006-09-26 06:30:55 +00:00
|
|
|
#ifndef ARCH_HAS_KMAP
|
2005-04-16 22:20:36 +00:00
|
|
|
static inline void *kmap(struct page *page)
|
|
|
|
{
|
|
|
|
might_sleep();
|
|
|
|
return page_address(page);
|
|
|
|
}
|
|
|
|
|
2009-06-16 22:32:45 +00:00
|
|
|
static inline void kunmap(struct page *page)
|
|
|
|
{
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2010-10-26 21:21:51 +00:00
|
|
|
static inline void *__kmap_atomic(struct page *page)
|
2007-05-01 20:33:07 +00:00
|
|
|
{
|
|
|
|
pagefault_disable();
|
|
|
|
return page_address(page);
|
|
|
|
}
|
2010-10-26 21:21:51 +00:00
|
|
|
#define kmap_atomic_prot(page, prot) __kmap_atomic(page)
|
2007-05-01 20:33:07 +00:00
|
|
|
|
2010-10-26 21:21:51 +00:00
|
|
|
static inline void __kunmap_atomic(void *addr)
|
2010-08-10 00:19:03 +00:00
|
|
|
{
|
|
|
|
pagefault_enable();
|
|
|
|
}
|
|
|
|
|
2010-10-26 21:21:51 +00:00
|
|
|
#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
|
2005-04-16 22:20:36 +00:00
|
|
|
#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
|
2007-05-02 17:27:15 +00:00
|
|
|
|
|
|
|
#define kmap_flush_unused() do {} while(0)
|
2006-09-26 06:30:55 +00:00
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#endif /* CONFIG_HIGHMEM */
|
|
|
|
|
2010-10-27 22:32:57 +00:00
|
|
|
#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
|
|
|
|
|
|
|
|
DECLARE_PER_CPU(int, __kmap_atomic_idx);
|
|
|
|
|
|
|
|
static inline int kmap_atomic_idx_push(void)
|
|
|
|
{
|
2010-12-06 17:40:03 +00:00
|
|
|
int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
|
|
|
|
|
2010-10-27 22:32:57 +00:00
|
|
|
#ifdef CONFIG_DEBUG_HIGHMEM
|
|
|
|
WARN_ON_ONCE(in_irq() && !irqs_disabled());
|
|
|
|
BUG_ON(idx > KM_TYPE_NR);
|
|
|
|
#endif
|
|
|
|
return idx;
|
|
|
|
}
|
|
|
|
|
2010-10-27 22:32:58 +00:00
|
|
|
static inline int kmap_atomic_idx(void)
|
|
|
|
{
|
2010-12-06 17:40:03 +00:00
|
|
|
return __this_cpu_read(__kmap_atomic_idx) - 1;
|
2010-10-27 22:32:58 +00:00
|
|
|
}
|
|
|
|
|
2010-12-06 17:40:03 +00:00
|
|
|
static inline void kmap_atomic_idx_pop(void)
|
2010-10-27 22:32:57 +00:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_DEBUG_HIGHMEM
|
2010-12-06 17:40:03 +00:00
|
|
|
int idx = __this_cpu_dec_return(__kmap_atomic_idx);
|
|
|
|
|
2010-10-27 22:32:57 +00:00
|
|
|
BUG_ON(idx < 0);
|
2010-12-06 17:40:03 +00:00
|
|
|
#else
|
|
|
|
__this_cpu_dec(__kmap_atomic_idx);
|
2010-10-27 22:32:57 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2010-10-26 21:21:51 +00:00
|
|
|
/*
|
|
|
|
* Make both: kmap_atomic(page, idx) and kmap_atomic(page) work.
|
|
|
|
*/
|
|
|
|
#define kmap_atomic(page, args...) __kmap_atomic(page)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Prevent people trying to call kunmap_atomic() as if it were kunmap()
|
|
|
|
* kunmap_atomic() should get the return value of kmap_atomic, not the page.
|
|
|
|
*/
|
|
|
|
#define kunmap_atomic(addr, args...) \
|
|
|
|
do { \
|
|
|
|
BUILD_BUG_ON(__same_type((addr), struct page *)); \
|
|
|
|
__kunmap_atomic(addr); \
|
|
|
|
} while (0)
|
2010-08-10 00:18:32 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
|
2008-11-27 11:13:58 +00:00
|
|
|
#ifndef clear_user_highpage
|
2005-04-16 22:20:36 +00:00
|
|
|
static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
|
|
|
|
{
|
|
|
|
void *addr = kmap_atomic(page, KM_USER0);
|
|
|
|
clear_user_page(addr, vaddr, page);
|
|
|
|
kunmap_atomic(addr, KM_USER0);
|
|
|
|
}
|
2008-11-27 11:13:58 +00:00
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
|
2007-07-17 11:03:05 +00:00
|
|
|
/**
|
|
|
|
* __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
|
|
|
|
* @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
|
|
|
|
* @vma: The VMA the page is to be allocated for
|
|
|
|
* @vaddr: The virtual address the page will be inserted into
|
|
|
|
*
|
|
|
|
* This function will allocate a page for a VMA but the caller is expected
|
|
|
|
* to specify via movableflags whether the page will be movable in the
|
|
|
|
* future or not
|
|
|
|
*
|
|
|
|
* An architecture may override this function by defining
|
|
|
|
* __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
|
|
|
|
* implementation.
|
|
|
|
*/
|
2005-04-16 22:20:36 +00:00
|
|
|
static inline struct page *
|
2007-07-17 11:03:05 +00:00
|
|
|
__alloc_zeroed_user_highpage(gfp_t movableflags,
|
|
|
|
struct vm_area_struct *vma,
|
|
|
|
unsigned long vaddr)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2007-07-17 11:03:05 +00:00
|
|
|
struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
|
|
|
|
vma, vaddr);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (page)
|
|
|
|
clear_user_highpage(page, vaddr);
|
|
|
|
|
|
|
|
return page;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2007-07-17 11:03:05 +00:00
|
|
|
/**
|
|
|
|
* alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
|
|
|
|
* @vma: The VMA the page is to be allocated for
|
|
|
|
* @vaddr: The virtual address the page will be inserted into
|
|
|
|
*
|
|
|
|
* This function will allocate a page for a VMA that the caller knows will
|
|
|
|
* be able to migrate in the future using move_pages() or reclaimed
|
|
|
|
*/
|
|
|
|
static inline struct page *
|
|
|
|
alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
|
|
|
|
unsigned long vaddr)
|
|
|
|
{
|
|
|
|
return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
static inline void clear_highpage(struct page *page)
|
|
|
|
{
|
|
|
|
void *kaddr = kmap_atomic(page, KM_USER0);
|
|
|
|
clear_page(kaddr);
|
|
|
|
kunmap_atomic(kaddr, KM_USER0);
|
|
|
|
}
|
|
|
|
|
2008-02-05 06:28:29 +00:00
|
|
|
static inline void zero_user_segments(struct page *page,
|
|
|
|
unsigned start1, unsigned end1,
|
|
|
|
unsigned start2, unsigned end2)
|
|
|
|
{
|
|
|
|
void *kaddr = kmap_atomic(page, KM_USER0);
|
|
|
|
|
|
|
|
BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
|
|
|
|
|
|
|
|
if (end1 > start1)
|
|
|
|
memset(kaddr + start1, 0, end1 - start1);
|
|
|
|
|
|
|
|
if (end2 > start2)
|
|
|
|
memset(kaddr + start2, 0, end2 - start2);
|
|
|
|
|
|
|
|
kunmap_atomic(kaddr, KM_USER0);
|
|
|
|
flush_dcache_page(page);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void zero_user_segment(struct page *page,
|
|
|
|
unsigned start, unsigned end)
|
|
|
|
{
|
|
|
|
zero_user_segments(page, start, end, 0, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void zero_user(struct page *page,
|
|
|
|
unsigned start, unsigned size)
|
|
|
|
{
|
|
|
|
zero_user_segments(page, start, start + size, 0, 0);
|
|
|
|
}
|
2007-05-09 09:35:07 +00:00
|
|
|
|
2007-05-09 09:35:09 +00:00
|
|
|
static inline void __deprecated memclear_highpage_flush(struct page *page,
|
2007-05-09 09:35:07 +00:00
|
|
|
unsigned int offset, unsigned int size)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-02-05 06:28:29 +00:00
|
|
|
zero_user(page, offset, size);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2006-12-12 17:14:54 +00:00
|
|
|
#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
|
|
|
|
|
2006-12-12 17:14:55 +00:00
|
|
|
static inline void copy_user_highpage(struct page *to, struct page *from,
|
|
|
|
unsigned long vaddr, struct vm_area_struct *vma)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
char *vfrom, *vto;
|
|
|
|
|
|
|
|
vfrom = kmap_atomic(from, KM_USER0);
|
|
|
|
vto = kmap_atomic(to, KM_USER1);
|
|
|
|
copy_user_page(vto, vfrom, vaddr, to);
|
|
|
|
kunmap_atomic(vto, KM_USER1);
|
2010-10-26 21:21:47 +00:00
|
|
|
kunmap_atomic(vfrom, KM_USER0);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2006-12-12 17:14:54 +00:00
|
|
|
#endif
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
static inline void copy_highpage(struct page *to, struct page *from)
|
|
|
|
{
|
|
|
|
char *vfrom, *vto;
|
|
|
|
|
|
|
|
vfrom = kmap_atomic(from, KM_USER0);
|
|
|
|
vto = kmap_atomic(to, KM_USER1);
|
|
|
|
copy_page(vto, vfrom);
|
|
|
|
kunmap_atomic(vto, KM_USER1);
|
2010-10-26 21:21:47 +00:00
|
|
|
kunmap_atomic(vfrom, KM_USER0);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* _LINUX_HIGHMEM_H */
|