mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-05 00:20:32 +00:00
memfd: fix F_SEAL_WRITE after shmem huge page allocated
commit f2b277c4d1
upstream.
Wangyong reports: after enabling tmpfs filesystem to support transparent
hugepage with the following command:
echo always > /sys/kernel/mm/transparent_hugepage/shmem_enabled
the docker program tries to add F_SEAL_WRITE through the following
command, but it fails unexpectedly with errno EBUSY:
fcntl(5, F_ADD_SEALS, F_SEAL_WRITE) = -1.
That is because memfd_tag_pins() and memfd_wait_for_pins() were never
updated for shmem huge pages: checking page_mapcount() against
page_count() is hopeless on THP subpages - they need to check
total_mapcount() against page_count() on THP heads only.
Make memfd_tag_pins() (compared > 1) as strict as memfd_wait_for_pins()
(compared != 1): either can be justified, but given the non-atomic
total_mapcount() calculation, it is better now to be strict. Bear in
mind that total_mapcount() itself scans all of the THP subpages, when
choosing to take an XA_CHECK_SCHED latency break.
Also fix the unlikely xa_is_value() case in memfd_wait_for_pins(): if a
page has been swapped out since memfd_tag_pins(), then its refcount must
have fallen, and so it can safely be untagged.
Link: https://lkml.kernel.org/r/a4f79248-df75-2c8c-3df-ba3317ccb5da@google.com
Signed-off-by: Hugh Dickins <hughd@google.com>
Reported-by: Zeal Robot <zealci@zte.com.cn>
Reported-by: wangyong <wang.yong12@zte.com.cn>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: CGEL ZTE <cgel.zte@gmail.com>
Cc: Kirill A. Shutemov <kirill@shutemov.name>
Cc: Song Liu <songliubraving@fb.com>
Cc: Yang Yang <yang.yang29@zte.com.cn>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
a472c95af5
commit
463c2accd6
1 changed files with 22 additions and 8 deletions
30
mm/memfd.c
30
mm/memfd.c
|
@ -34,26 +34,35 @@ static void memfd_tag_pins(struct address_space *mapping)
|
|||
void __rcu **slot;
|
||||
pgoff_t start;
|
||||
struct page *page;
|
||||
unsigned int tagged = 0;
|
||||
int latency = 0;
|
||||
int cache_count;
|
||||
|
||||
lru_add_drain();
|
||||
start = 0;
|
||||
|
||||
xa_lock_irq(&mapping->i_pages);
|
||||
radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) {
|
||||
cache_count = 1;
|
||||
page = radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock);
|
||||
if (!page || radix_tree_exception(page)) {
|
||||
if (!page || radix_tree_exception(page) || PageTail(page)) {
|
||||
if (radix_tree_deref_retry(page)) {
|
||||
slot = radix_tree_iter_retry(&iter);
|
||||
continue;
|
||||
}
|
||||
} else if (page_count(page) - page_mapcount(page) > 1) {
|
||||
radix_tree_tag_set(&mapping->i_pages, iter.index,
|
||||
MEMFD_TAG_PINNED);
|
||||
} else {
|
||||
if (PageTransHuge(page) && !PageHuge(page))
|
||||
cache_count = HPAGE_PMD_NR;
|
||||
if (cache_count !=
|
||||
page_count(page) - total_mapcount(page)) {
|
||||
radix_tree_tag_set(&mapping->i_pages,
|
||||
iter.index, MEMFD_TAG_PINNED);
|
||||
}
|
||||
}
|
||||
|
||||
if (++tagged % 1024)
|
||||
latency += cache_count;
|
||||
if (latency < 1024)
|
||||
continue;
|
||||
latency = 0;
|
||||
|
||||
slot = radix_tree_iter_resume(slot, &iter);
|
||||
xa_unlock_irq(&mapping->i_pages);
|
||||
|
@ -79,6 +88,7 @@ static int memfd_wait_for_pins(struct address_space *mapping)
|
|||
pgoff_t start;
|
||||
struct page *page;
|
||||
int error, scan;
|
||||
int cache_count;
|
||||
|
||||
memfd_tag_pins(mapping);
|
||||
|
||||
|
@ -107,8 +117,12 @@ static int memfd_wait_for_pins(struct address_space *mapping)
|
|||
page = NULL;
|
||||
}
|
||||
|
||||
if (page &&
|
||||
page_count(page) - page_mapcount(page) != 1) {
|
||||
cache_count = 1;
|
||||
if (page && PageTransHuge(page) && !PageHuge(page))
|
||||
cache_count = HPAGE_PMD_NR;
|
||||
|
||||
if (page && cache_count !=
|
||||
page_count(page) - total_mapcount(page)) {
|
||||
if (scan < LAST_SCAN)
|
||||
goto continue_resched;
|
||||
|
||||
|
|
Loading…
Reference in a new issue