diff --git a/mm/khugepaged.c b/mm/khugepaged.c index d72e74e007fc..6b9d39d65b73 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1941,16 +1941,6 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, result = SCAN_FAIL; goto xa_locked; } - xas_store(&xas, hpage); - if (xas_error(&xas)) { - /* revert shmem_charge performed - * in the previous condition - */ - mapping->nrpages--; - shmem_uncharge(mapping->host, 1); - result = SCAN_STORE_FAILED; - goto xa_locked; - } nr_none++; continue; } @@ -2105,13 +2095,6 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, * Accumulate the pages that are being collapsed. */ list_add_tail(&page->lru, &pagelist); - - /* - * We can't get an ENOMEM here (because the allocation happened - * before) but let's check for errors (XArray implementation - * can be changed in the future) - */ - WARN_ON_ONCE(xas_error(&xas)); continue; out_unlock: unlock_page(page); @@ -2134,11 +2117,6 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, } } - /* Here we can't get an ENOMEM (because entries were - * previously allocated) But let's check for errors - * (XArray implementation can be changed in the future) - */ - WARN_ON_ONCE(xas_error(&xas)); xa_locked: xas_unlock_irq(&xas); xa_unlocked: @@ -2259,6 +2237,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, /* Join all the small entries into a single multi-index entry. */ xas_set_order(&xas, start, HPAGE_PMD_ORDER); xas_store(&xas, hpage); + WARN_ON_ONCE(xas_error(&xas)); xas_unlock_irq(&xas); /*