mm/hugetlb: compute/return the number of regions added by region_add()

Modify region_add() to keep track of regions(pages) added to the reserve
map and return this value.  The return value can be compared to the return
value of region_chg() to determine if the map was modified between calls.

Make vma_commit_reservation() also pass along the return value of
region_add().  In the normal case, we want vma_commit_reservation to
return the same value as the preceding call to vma_needs_reservation.
Create a common __vma_reservation_common routine to help keep the special
case return values in sync

Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: David Rientjes <rientjes@google.com>
Cc: Luiz Capitulino <lcapitulino@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Mike Kravetz 2015-06-24 16:57:55 -07:00 committed by Linus Torvalds
parent 1dd308a7b4
commit cf3ad20bfe
1 changed files with 48 additions and 24 deletions

View File

@ -245,11 +245,15 @@ struct file_region {
* expanded, because region_add is only called after region_chg * expanded, because region_add is only called after region_chg
* with the same range. If a new file_region structure must * with the same range. If a new file_region structure must
* be allocated, it is done in region_chg. * be allocated, it is done in region_chg.
*
* Return the number of new huge pages added to the map. This
* number is greater than or equal to zero.
*/ */
static long region_add(struct resv_map *resv, long f, long t) static long region_add(struct resv_map *resv, long f, long t)
{ {
struct list_head *head = &resv->regions; struct list_head *head = &resv->regions;
struct file_region *rg, *nrg, *trg; struct file_region *rg, *nrg, *trg;
long add = 0;
spin_lock(&resv->lock); spin_lock(&resv->lock);
/* Locate the region we are either in or before. */ /* Locate the region we are either in or before. */
@ -275,14 +279,24 @@ static long region_add(struct resv_map *resv, long f, long t)
if (rg->to > t) if (rg->to > t)
t = rg->to; t = rg->to;
if (rg != nrg) { if (rg != nrg) {
/* Decrement return value by the deleted range.
* Another range will span this area so that by
* end of routine add will be >= zero
*/
add -= (rg->to - rg->from);
list_del(&rg->link); list_del(&rg->link);
kfree(rg); kfree(rg);
} }
} }
add += (nrg->from - f); /* Added to beginning of region */
nrg->from = f; nrg->from = f;
add += t - nrg->to; /* Added to end of region */
nrg->to = t; nrg->to = t;
spin_unlock(&resv->lock); spin_unlock(&resv->lock);
return 0; VM_BUG_ON(add < 0);
return add;
} }
/* /*
@ -1470,46 +1484,56 @@ static void return_unused_surplus_pages(struct hstate *h,
} }
/* /*
* Determine if the huge page at addr within the vma has an associated * vma_needs_reservation and vma_commit_reservation are used by the huge
* reservation. Where it does not we will need to logically increase * page allocation routines to manage reservations.
* reservation and actually increase subpool usage before an allocation *
* can occur. Where any new reservation would be required the * vma_needs_reservation is called to determine if the huge page at addr
* reservation change is prepared, but not committed. Once the page * within the vma has an associated reservation. If a reservation is
* has been allocated from the subpool and instantiated the change should * needed, the value 1 is returned. The caller is then responsible for
* be committed via vma_commit_reservation. No action is required on * managing the global reservation and subpool usage counts. After
* failure. * the huge page has been allocated, vma_commit_reservation is called
* to add the page to the reservation map.
*
* In the normal case, vma_commit_reservation returns the same value
* as the preceding vma_needs_reservation call. The only time this
* is not the case is if a reserve map was changed between calls. It
* is the responsibility of the caller to notice the difference and
* take appropriate action.
*/ */
static long vma_needs_reservation(struct hstate *h, static long __vma_reservation_common(struct hstate *h,
struct vm_area_struct *vma, unsigned long addr) struct vm_area_struct *vma, unsigned long addr,
bool commit)
{ {
struct resv_map *resv; struct resv_map *resv;
pgoff_t idx; pgoff_t idx;
long chg; long ret;
resv = vma_resv_map(vma); resv = vma_resv_map(vma);
if (!resv) if (!resv)
return 1; return 1;
idx = vma_hugecache_offset(h, vma, addr); idx = vma_hugecache_offset(h, vma, addr);
chg = region_chg(resv, idx, idx + 1); if (commit)
ret = region_add(resv, idx, idx + 1);
else
ret = region_chg(resv, idx, idx + 1);
if (vma->vm_flags & VM_MAYSHARE) if (vma->vm_flags & VM_MAYSHARE)
return chg; return ret;
else else
return chg < 0 ? chg : 0; return ret < 0 ? ret : 0;
} }
static void vma_commit_reservation(struct hstate *h,
static long vma_needs_reservation(struct hstate *h,
struct vm_area_struct *vma, unsigned long addr) struct vm_area_struct *vma, unsigned long addr)
{ {
struct resv_map *resv; return __vma_reservation_common(h, vma, addr, false);
pgoff_t idx; }
resv = vma_resv_map(vma); static long vma_commit_reservation(struct hstate *h,
if (!resv) struct vm_area_struct *vma, unsigned long addr)
return; {
return __vma_reservation_common(h, vma, addr, true);
idx = vma_hugecache_offset(h, vma, addr);
region_add(resv, idx, idx + 1);
} }
static struct page *alloc_huge_page(struct vm_area_struct *vma, static struct page *alloc_huge_page(struct vm_area_struct *vma,