drm/ttm: rework no_retry handling v2

During eviction we do want to trigger the OOM killer.

Only while doing new allocations we should try to avoid that and
return -ENOMEM to the application.

v2: rename the flag to gfp_retry_mayfail.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/398685/
This commit is contained in:
Christian König 2020-11-02 13:01:53 +01:00
parent d74252bb8f
commit 586052b0a6
6 changed files with 5 additions and 14 deletions

View File

@ -516,6 +516,8 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
struct ttm_operation_ctx ctx = {
.interruptible = (bp->type != ttm_bo_type_kernel),
.no_wait_gpu = bp->no_wait_gpu,
/* We opt to avoid OOM on system pages allocations */
.gfp_retry_mayfail = true,
.resv = bp->resv,
.flags = bp->type != ttm_bo_type_kernel ?
TTM_OPT_FLAG_ALLOW_RES_EVICT : 0

View File

@ -1914,9 +1914,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
}
adev->mman.initialized = true;
/* We opt to avoid OOM on system pages allocations */
adev->mman.bdev.no_retry = true;
/* Initialize VRAM pool with all of VRAM divided into pages */
r = amdgpu_vram_mgr_init(adev);
if (r) {

View File

@ -367,7 +367,7 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
if (tt->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
gfp_flags |= __GFP_ZERO;
if (tt->page_flags & TTM_PAGE_FLAG_NO_RETRY)
if (ctx->gfp_retry_mayfail)
gfp_flags |= __GFP_RETRY_MAYFAIL;
if (pool->use_dma32)

View File

@ -51,9 +51,6 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
if (bo->ttm)
return 0;
if (bdev->no_retry)
page_flags |= TTM_PAGE_FLAG_NO_RETRY;
switch (bo->type) {
case ttm_bo_type_device:
if (zero_alloc)
@ -211,8 +208,6 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
swap_space = swap_storage->f_mapping;
gfp_mask = mapping_gfp_mask(swap_space);
if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY)
gfp_mask |= __GFP_RETRY_MAYFAIL;
for (i = 0; i < ttm->num_pages; ++i) {
from_page = shmem_read_mapping_page_gfp(swap_space, i,
@ -260,8 +255,6 @@ int ttm_tt_swapout(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
swap_space = swap_storage->f_mapping;
gfp_mask = mapping_gfp_mask(swap_space);
if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY)
gfp_mask |= __GFP_RETRY_MAYFAIL;
for (i = 0; i < ttm->num_pages; ++i) {
from_page = ttm->pages[i];

View File

@ -195,6 +195,7 @@ struct ttm_bo_kmap_obj {
*
* @interruptible: Sleep interruptible if sleeping.
* @no_wait_gpu: Return immediately if the GPU is busy.
* @gfp_retry_mayfail: Set the __GFP_RETRY_MAYFAIL when allocation pages.
* @resv: Reservation object to allow reserved evictions with.
* @flags: Including the following flags
*
@ -204,6 +205,7 @@ struct ttm_bo_kmap_obj {
struct ttm_operation_ctx {
bool interruptible;
bool no_wait_gpu;
bool gfp_retry_mayfail;
struct dma_resv *resv;
uint64_t bytes_moved;
uint32_t flags;

View File

@ -276,7 +276,6 @@ extern struct ttm_bo_global {
* @dev_mapping: A pointer to the struct address_space representing the
* device address space.
* @wq: Work queue structure for the delayed delete workqueue.
* @no_retry: Don't retry allocation if it fails
*
*/
@ -314,8 +313,6 @@ struct ttm_bo_device {
*/
struct delayed_work wq;
bool no_retry;
};
static inline struct ttm_resource_manager *ttm_manager_type(struct ttm_bo_device *bdev,