mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-11-01 17:08:10 +00:00
478a1469a7
- We use a bit in an exceptional radix tree entry as a lock bit and use it similarly to how page lock is used for normal faults. This fixes races between hole instantiation and read faults of the same index. - Filesystem DAX PMD faults are disabled, and will be re-enabled when PMD locking is implemented. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJXRKwLAAoJEJ/BjXdf9fLB+BkP/3HBm05KlAKDklvnBIPFDMUK hA7g2K6vuvaEDZXZQ1ioc1Ajf1sCpVip7shXJsojZqwWmRz0/4nneF7ytluW9AjS dBX+0qCgKGH1fnwyGFF+MN7fuj7kGrSDz34lG0OObRN6/oKiVNb2svXiYKkT6J6C AgsWlWRUpMy9jrn1u/FduMjDhk92Z3ojarexuicr0i8NUlBClCIrdCEmUMi4orSB DuiIjestLOc7+mERBUwrXkzoh9v8Z0FpIgnDLWwpeEkAvJwWkGe5eXrBJwF+hEbi RYfTrOYc7bBQLo22LRb8pdighjrx3OW9EpNCfEmLDOjM3cYBbMK/d2i/ww52H6IK Mw6iS5rXdGgJtQIGL8N96HLFk+cDyZ8J8xNUCwbYYBJqgpMzxzVkL3vTm72tyFnl InWhih+miCMbBPytQSRd6+1wZG2piJTv6SsFTd5K1OaiRmJhBJZG47t2QTBRBu7Y 5A4FGPtlraV+iDJvD6VLO1Tp8twxdLluOJ2BwdGeiKXiGh6LP+FGGFF3aFa5N4Ro xSslCTX7Q1G66zXQwD4+IMWLwS1FDNymPkUSsF6RQo6qfAnl9SrmYTc4xJ4QXy92 sUdrWEz2OBTfxKNqbGyc/KrXKZT3RnEkJNft8snB2h6WTCdOPaNYs/yETUwiwkSc CXpuQFrxm69QYwNsqVu1 =Pkd0 -----END PGP SIGNATURE----- Merge tag 'dax-locking-for-4.7' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm Pull DAX locking updates from Ross Zwisler: "Filesystem DAX locking for 4.7 - We use a bit in an exceptional radix tree entry as a lock bit and use it similarly to how page lock is used for normal faults. This fixes races between hole instantiation and read faults of the same index. - Filesystem DAX PMD faults are disabled, and will be re-enabled when PMD locking is implemented" * tag 'dax-locking-for-4.7' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: dax: Remove i_mmap_lock protection dax: Use radix tree entry lock to protect cow faults dax: New fault locking dax: Allow DAX code to replace exceptional entries dax: Define DAX lock bit for radix tree exceptional entry dax: Make huge page handling depend of CONFIG_BROKEN dax: Fix condition for filling of PMD holes
76 lines
2.6 KiB
C
76 lines
2.6 KiB
C
#ifndef _LINUX_DAX_H
|
|
#define _LINUX_DAX_H
|
|
|
|
#include <linux/fs.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/radix-tree.h>
|
|
#include <asm/pgtable.h>
|
|
|
|
/* We use lowest available exceptional entry bit for locking */
|
|
#define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
|
|
|
|
ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *,
|
|
get_block_t, dio_iodone_t, int flags);
|
|
int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
|
|
int dax_truncate_page(struct inode *, loff_t from, get_block_t);
|
|
int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t);
|
|
int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t);
|
|
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
|
|
void dax_wake_mapping_entry_waiter(struct address_space *mapping,
|
|
pgoff_t index, bool wake_all);
|
|
|
|
#ifdef CONFIG_FS_DAX
|
|
struct page *read_dax_sector(struct block_device *bdev, sector_t n);
|
|
void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index);
|
|
int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
|
|
unsigned int offset, unsigned int length);
|
|
#else
|
|
static inline struct page *read_dax_sector(struct block_device *bdev,
|
|
sector_t n)
|
|
{
|
|
return ERR_PTR(-ENXIO);
|
|
}
|
|
/* Shouldn't ever be called when dax is disabled. */
|
|
static inline void dax_unlock_mapping_entry(struct address_space *mapping,
|
|
pgoff_t index)
|
|
{
|
|
BUG();
|
|
}
|
|
static inline int __dax_zero_page_range(struct block_device *bdev,
|
|
sector_t sector, unsigned int offset, unsigned int length)
|
|
{
|
|
return -ENXIO;
|
|
}
|
|
#endif
|
|
|
|
#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
|
int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *,
|
|
unsigned int flags, get_block_t);
|
|
int __dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *,
|
|
unsigned int flags, get_block_t);
|
|
#else
|
|
static inline int dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
|
|
pmd_t *pmd, unsigned int flags, get_block_t gb)
|
|
{
|
|
return VM_FAULT_FALLBACK;
|
|
}
|
|
#define __dax_pmd_fault dax_pmd_fault
|
|
#endif
|
|
int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *);
|
|
#define dax_mkwrite(vma, vmf, gb) dax_fault(vma, vmf, gb)
|
|
#define __dax_mkwrite(vma, vmf, gb) __dax_fault(vma, vmf, gb)
|
|
|
|
static inline bool vma_is_dax(struct vm_area_struct *vma)
|
|
{
|
|
return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host);
|
|
}
|
|
|
|
static inline bool dax_mapping(struct address_space *mapping)
|
|
{
|
|
return mapping->host && IS_DAX(mapping->host);
|
|
}
|
|
|
|
struct writeback_control;
|
|
int dax_writeback_mapping_range(struct address_space *mapping,
|
|
struct block_device *bdev, struct writeback_control *wbc);
|
|
#endif
|