mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-11-01 00:48:50 +00:00
mm, dax, gpu: convert vm_insert_mixed to pfn_t
Convert the raw unsigned long 'pfn' argument to pfn_t for the purpose of evaluating the PFN_MAP and PFN_DEV flags. When both are set it triggers _PAGE_DEVMAP to be set in the resulting pte. There are no functional changes to the gpu drivers as a result of this conversion. Signed-off-by: Dan Williams <dan.j.williams@intel.com> Cc: Dave Hansen <dave@sr71.net> Cc: David Airlie <airlied@linux.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
69660fd797
commit
01c8f1c44b
10 changed files with 61 additions and 14 deletions
|
@ -247,6 +247,11 @@ static inline pte_t pte_mkspecial(pte_t pte)
|
||||||
return pte_set_flags(pte, _PAGE_SPECIAL);
|
return pte_set_flags(pte, _PAGE_SPECIAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline pte_t pte_mkdevmap(pte_t pte)
|
||||||
|
{
|
||||||
|
return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP);
|
||||||
|
}
|
||||||
|
|
||||||
static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
|
static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
|
||||||
{
|
{
|
||||||
pmdval_t v = native_pmd_val(pmd);
|
pmdval_t v = native_pmd_val(pmd);
|
||||||
|
|
|
@ -14,6 +14,7 @@
|
||||||
|
|
||||||
#include <linux/shmem_fs.h>
|
#include <linux/shmem_fs.h>
|
||||||
#include <linux/dma-buf.h>
|
#include <linux/dma-buf.h>
|
||||||
|
#include <linux/pfn_t.h>
|
||||||
#include <drm/exynos_drm.h>
|
#include <drm/exynos_drm.h>
|
||||||
|
|
||||||
#include "exynos_drm_drv.h"
|
#include "exynos_drm_drv.h"
|
||||||
|
@ -490,7 +491,8 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||||
}
|
}
|
||||||
|
|
||||||
pfn = page_to_pfn(exynos_gem->pages[page_offset]);
|
pfn = page_to_pfn(exynos_gem->pages[page_offset]);
|
||||||
ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
|
ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
|
||||||
|
__pfn_to_pfn_t(pfn, PFN_DEV));
|
||||||
|
|
||||||
out:
|
out:
|
||||||
switch (ret) {
|
switch (ret) {
|
||||||
|
|
|
@ -21,6 +21,7 @@
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
|
#include <linux/pfn_t.h>
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/tty.h>
|
#include <linux/tty.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
|
@ -132,7 +133,8 @@ static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||||
for (i = 0; i < page_num; i++) {
|
for (i = 0; i < page_num; i++) {
|
||||||
pfn = (phys_addr >> PAGE_SHIFT);
|
pfn = (phys_addr >> PAGE_SHIFT);
|
||||||
|
|
||||||
ret = vm_insert_mixed(vma, address, pfn);
|
ret = vm_insert_mixed(vma, address,
|
||||||
|
__pfn_to_pfn_t(pfn, PFN_DEV));
|
||||||
if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
|
if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
|
||||||
break;
|
break;
|
||||||
else if (unlikely(ret != 0)) {
|
else if (unlikely(ret != 0)) {
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
#include <linux/shmem_fs.h>
|
#include <linux/shmem_fs.h>
|
||||||
#include <linux/dma-buf.h>
|
#include <linux/dma-buf.h>
|
||||||
|
#include <linux/pfn_t.h>
|
||||||
|
|
||||||
#include "msm_drv.h"
|
#include "msm_drv.h"
|
||||||
#include "msm_gem.h"
|
#include "msm_gem.h"
|
||||||
|
@ -222,7 +223,8 @@ int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||||
VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
|
VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
|
||||||
pfn, pfn << PAGE_SHIFT);
|
pfn, pfn << PAGE_SHIFT);
|
||||||
|
|
||||||
ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
|
ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
|
||||||
|
__pfn_to_pfn_t(pfn, PFN_DEV));
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
|
|
||||||
#include <linux/shmem_fs.h>
|
#include <linux/shmem_fs.h>
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
|
#include <linux/pfn_t.h>
|
||||||
|
|
||||||
#include <drm/drm_vma_manager.h>
|
#include <drm/drm_vma_manager.h>
|
||||||
|
|
||||||
|
@ -385,7 +386,8 @@ static int fault_1d(struct drm_gem_object *obj,
|
||||||
VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
|
VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
|
||||||
pfn, pfn << PAGE_SHIFT);
|
pfn, pfn << PAGE_SHIFT);
|
||||||
|
|
||||||
return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
|
return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
|
||||||
|
__pfn_to_pfn_t(pfn, PFN_DEV));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Special handling for the case of faulting in 2d tiled buffers */
|
/* Special handling for the case of faulting in 2d tiled buffers */
|
||||||
|
@ -478,7 +480,8 @@ static int fault_2d(struct drm_gem_object *obj,
|
||||||
pfn, pfn << PAGE_SHIFT);
|
pfn, pfn << PAGE_SHIFT);
|
||||||
|
|
||||||
for (i = n; i > 0; i--) {
|
for (i = n; i > 0; i--) {
|
||||||
vm_insert_mixed(vma, (unsigned long)vaddr, pfn);
|
vm_insert_mixed(vma, (unsigned long)vaddr,
|
||||||
|
__pfn_to_pfn_t(pfn, PFN_DEV));
|
||||||
pfn += usergart[fmt].stride_pfn;
|
pfn += usergart[fmt].stride_pfn;
|
||||||
vaddr += PAGE_SIZE * m;
|
vaddr += PAGE_SIZE * m;
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,6 +35,7 @@
|
||||||
#include <ttm/ttm_placement.h>
|
#include <ttm/ttm_placement.h>
|
||||||
#include <drm/drm_vma_manager.h>
|
#include <drm/drm_vma_manager.h>
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
|
#include <linux/pfn_t.h>
|
||||||
#include <linux/rbtree.h>
|
#include <linux/rbtree.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
|
@ -229,7 +230,8 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vma->vm_flags & VM_MIXEDMAP)
|
if (vma->vm_flags & VM_MIXEDMAP)
|
||||||
ret = vm_insert_mixed(&cvma, address, pfn);
|
ret = vm_insert_mixed(&cvma, address,
|
||||||
|
__pfn_to_pfn_t(pfn, PFN_DEV));
|
||||||
else
|
else
|
||||||
ret = vm_insert_pfn(&cvma, address, pfn);
|
ret = vm_insert_pfn(&cvma, address, pfn);
|
||||||
|
|
||||||
|
|
2
fs/dax.c
2
fs/dax.c
|
@ -363,7 +363,7 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
|
||||||
}
|
}
|
||||||
dax_unmap_atomic(bdev, &dax);
|
dax_unmap_atomic(bdev, &dax);
|
||||||
|
|
||||||
error = vm_insert_mixed(vma, vaddr, pfn_t_to_pfn(dax.pfn));
|
error = vm_insert_mixed(vma, vaddr, dax.pfn);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
i_mmap_unlock_read(mapping);
|
i_mmap_unlock_read(mapping);
|
||||||
|
|
|
@ -2107,7 +2107,7 @@ int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
|
||||||
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
||||||
unsigned long pfn);
|
unsigned long pfn);
|
||||||
int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
|
int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
|
||||||
unsigned long pfn);
|
pfn_t pfn);
|
||||||
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
|
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -64,4 +64,31 @@ static inline pfn_t page_to_pfn_t(struct page *page)
|
||||||
{
|
{
|
||||||
return pfn_to_pfn_t(page_to_pfn(page));
|
return pfn_to_pfn_t(page_to_pfn(page));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int pfn_t_valid(pfn_t pfn)
|
||||||
|
{
|
||||||
|
return pfn_valid(pfn_t_to_pfn(pfn));
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_MMU
|
||||||
|
static inline pte_t pfn_t_pte(pfn_t pfn, pgprot_t pgprot)
|
||||||
|
{
|
||||||
|
return pfn_pte(pfn_t_to_pfn(pfn), pgprot);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef __HAVE_ARCH_PTE_DEVMAP
|
||||||
|
static inline bool pfn_t_devmap(pfn_t pfn)
|
||||||
|
{
|
||||||
|
const unsigned long flags = PFN_DEV|PFN_MAP;
|
||||||
|
|
||||||
|
return (pfn.val & flags) == flags;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static inline bool pfn_t_devmap(pfn_t pfn)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
pte_t pte_mkdevmap(pte_t pte);
|
||||||
|
#endif
|
||||||
#endif /* _LINUX_PFN_T_H_ */
|
#endif /* _LINUX_PFN_T_H_ */
|
||||||
|
|
16
mm/memory.c
16
mm/memory.c
|
@ -50,6 +50,7 @@
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
#include <linux/delayacct.h>
|
#include <linux/delayacct.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
|
#include <linux/pfn_t.h>
|
||||||
#include <linux/writeback.h>
|
#include <linux/writeback.h>
|
||||||
#include <linux/memcontrol.h>
|
#include <linux/memcontrol.h>
|
||||||
#include <linux/mmu_notifier.h>
|
#include <linux/mmu_notifier.h>
|
||||||
|
@ -1500,7 +1501,7 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
|
||||||
EXPORT_SYMBOL(vm_insert_page);
|
EXPORT_SYMBOL(vm_insert_page);
|
||||||
|
|
||||||
static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
||||||
unsigned long pfn, pgprot_t prot)
|
pfn_t pfn, pgprot_t prot)
|
||||||
{
|
{
|
||||||
struct mm_struct *mm = vma->vm_mm;
|
struct mm_struct *mm = vma->vm_mm;
|
||||||
int retval;
|
int retval;
|
||||||
|
@ -1516,7 +1517,10 @@ static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
/* Ok, finally just insert the thing.. */
|
/* Ok, finally just insert the thing.. */
|
||||||
entry = pte_mkspecial(pfn_pte(pfn, prot));
|
if (pfn_t_devmap(pfn))
|
||||||
|
entry = pte_mkdevmap(pfn_t_pte(pfn, prot));
|
||||||
|
else
|
||||||
|
entry = pte_mkspecial(pfn_t_pte(pfn, prot));
|
||||||
set_pte_at(mm, addr, pte, entry);
|
set_pte_at(mm, addr, pte, entry);
|
||||||
update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
|
update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
|
||||||
|
|
||||||
|
@ -1566,14 +1570,14 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
||||||
if (track_pfn_insert(vma, &pgprot, pfn))
|
if (track_pfn_insert(vma, &pgprot, pfn))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
ret = insert_pfn(vma, addr, pfn, pgprot);
|
ret = insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(vm_insert_pfn);
|
EXPORT_SYMBOL(vm_insert_pfn);
|
||||||
|
|
||||||
int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
|
int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
|
||||||
unsigned long pfn)
|
pfn_t pfn)
|
||||||
{
|
{
|
||||||
BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
|
BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
|
||||||
|
|
||||||
|
@ -1587,10 +1591,10 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
|
||||||
* than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP
|
* than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP
|
||||||
* without pte special, it would there be refcounted as a normal page.
|
* without pte special, it would there be refcounted as a normal page.
|
||||||
*/
|
*/
|
||||||
if (!HAVE_PTE_SPECIAL && pfn_valid(pfn)) {
|
if (!HAVE_PTE_SPECIAL && pfn_t_valid(pfn)) {
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
page = pfn_to_page(pfn);
|
page = pfn_t_to_page(pfn);
|
||||||
return insert_page(vma, addr, page, vma->vm_page_prot);
|
return insert_page(vma, addr, page, vma->vm_page_prot);
|
||||||
}
|
}
|
||||||
return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
|
return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
|
||||||
|
|
Loading…
Reference in a new issue