linux-stable/drivers/gpu/drm/i915/gem/i915_gem_pages.c

534 lines
13 KiB
C
Raw Normal View History

/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2014-2016 Intel Corporation
*/
#include "i915_drv.h"
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages,
unsigned int sg_page_sizes)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
unsigned long supported = INTEL_INFO(i915)->page_sizes;
int i;
lockdep_assert_held(&obj->mm.lock);
/* Make the pages coherent with the GPU (flushing any swapin). */
if (obj->cache_dirty) {
obj->write_domain = 0;
if (i915_gem_object_has_struct_page(obj))
drm_clflush_sg(pages);
obj->cache_dirty = false;
}
obj->mm.get_page.sg_pos = pages->sgl;
obj->mm.get_page.sg_idx = 0;
obj->mm.pages = pages;
if (i915_gem_object_is_tiled(obj) &&
i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
GEM_BUG_ON(obj->mm.quirked);
__i915_gem_object_pin_pages(obj);
obj->mm.quirked = true;
}
GEM_BUG_ON(!sg_page_sizes);
obj->mm.page_sizes.phys = sg_page_sizes;
/*
* Calculate the supported page-sizes which fit into the given
* sg_page_sizes. This will give us the page-sizes which we may be able
* to use opportunistically when later inserting into the GTT. For
* example if phys=2G, then in theory we should be able to use 1G, 2M,
* 64K or 4K pages, although in practice this will depend on a number of
* other factors.
*/
obj->mm.page_sizes.sg = 0;
for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
if (obj->mm.page_sizes.phys & ~0u << i)
obj->mm.page_sizes.sg |= BIT(i);
}
GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
drm/i915: Report all objects with allocated pages to the shrinker Currently, we try to report to the shrinker the precise number of objects (pages) that are available to be reaped at this moment. This requires searching all objects with allocated pages to see if they fulfill the search criteria, and this count is performed quite frequently. (The shrinker tries to free ~128 pages on each invocation, before which we count all the objects; counting takes longer than unbinding the objects!) If we take the pragmatic view that with sufficient desire, all objects are eventually reapable (they become inactive, or no longer used as framebuffer etc), we can simply return the count of pinned pages maintained during get_pages/put_pages rather than walk the lists every time. The downside is that we may (slightly) over-report the number of objects/pages we could shrink and so penalize ourselves by shrinking more than required. This is mitigated by keeping the order in which we shrink objects such that we avoid penalizing active and frequently used objects, and if memory is so tight that we need to free them we would need to anyway. v2: Only expose shrinkable objects to the shrinker; a small reduction in not considering stolen and foreign objects. v3: Restore the tracking from a "backup" copy from before the gem/ split Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Matthew Auld <matthew.auld@intel.com> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190530203500.26272-2-chris@chris-wilson.co.uk
2019-05-30 20:35:00 +00:00
if (i915_gem_object_is_shrinkable(obj)) {
struct list_head *list;
unsigned long flags;
spin_lock_irqsave(&i915->mm.obj_lock, flags);
drm/i915: Report all objects with allocated pages to the shrinker Currently, we try to report to the shrinker the precise number of objects (pages) that are available to be reaped at this moment. This requires searching all objects with allocated pages to see if they fulfill the search criteria, and this count is performed quite frequently. (The shrinker tries to free ~128 pages on each invocation, before which we count all the objects; counting takes longer than unbinding the objects!) If we take the pragmatic view that with sufficient desire, all objects are eventually reapable (they become inactive, or no longer used as framebuffer etc), we can simply return the count of pinned pages maintained during get_pages/put_pages rather than walk the lists every time. The downside is that we may (slightly) over-report the number of objects/pages we could shrink and so penalize ourselves by shrinking more than required. This is mitigated by keeping the order in which we shrink objects such that we avoid penalizing active and frequently used objects, and if memory is so tight that we need to free them we would need to anyway. v2: Only expose shrinkable objects to the shrinker; a small reduction in not considering stolen and foreign objects. v3: Restore the tracking from a "backup" copy from before the gem/ split Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Matthew Auld <matthew.auld@intel.com> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190530203500.26272-2-chris@chris-wilson.co.uk
2019-05-30 20:35:00 +00:00
i915->mm.shrink_count++;
i915->mm.shrink_memory += obj->base.size;
if (obj->mm.madv != I915_MADV_WILLNEED)
list = &i915->mm.purge_list;
else
list = &i915->mm.shrink_list;
list_add_tail(&obj->mm.link, list);
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
drm/i915: Report all objects with allocated pages to the shrinker Currently, we try to report to the shrinker the precise number of objects (pages) that are available to be reaped at this moment. This requires searching all objects with allocated pages to see if they fulfill the search criteria, and this count is performed quite frequently. (The shrinker tries to free ~128 pages on each invocation, before which we count all the objects; counting takes longer than unbinding the objects!) If we take the pragmatic view that with sufficient desire, all objects are eventually reapable (they become inactive, or no longer used as framebuffer etc), we can simply return the count of pinned pages maintained during get_pages/put_pages rather than walk the lists every time. The downside is that we may (slightly) over-report the number of objects/pages we could shrink and so penalize ourselves by shrinking more than required. This is mitigated by keeping the order in which we shrink objects such that we avoid penalizing active and frequently used objects, and if memory is so tight that we need to free them we would need to anyway. v2: Only expose shrinkable objects to the shrinker; a small reduction in not considering stolen and foreign objects. v3: Restore the tracking from a "backup" copy from before the gem/ split Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Matthew Auld <matthew.auld@intel.com> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190530203500.26272-2-chris@chris-wilson.co.uk
2019-05-30 20:35:00 +00:00
}
}
int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
int err;
if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
DRM_DEBUG("Attempting to obtain a purgeable object\n");
return -EFAULT;
}
err = obj->ops->get_pages(obj);
GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
return err;
}
/* Ensure that the associated pages are gathered from the backing storage
* and pinned into our object. i915_gem_object_pin_pages() may be called
* multiple times before they are released by a single call to
* i915_gem_object_unpin_pages() - once the pages are no longer referenced
* either as a result of memory pressure (reaping pages under the shrinker)
* or as the object is itself released.
*/
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
int err;
err = mutex_lock_interruptible(&obj->mm.lock);
if (err)
return err;
if (unlikely(!i915_gem_object_has_pages(obj))) {
GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
err = ____i915_gem_object_get_pages(obj);
if (err)
goto unlock;
smp_mb__before_atomic();
}
atomic_inc(&obj->mm.pages_pin_count);
unlock:
mutex_unlock(&obj->mm.lock);
return err;
}
/* Immediately discard the backing storage */
void i915_gem_object_truncate(struct drm_i915_gem_object *obj)
{
drm_gem_free_mmap_offset(&obj->base);
if (obj->ops->truncate)
obj->ops->truncate(obj);
}
/* Try to discard unwanted pages */
void i915_gem_object_writeback(struct drm_i915_gem_object *obj)
{
lockdep_assert_held(&obj->mm.lock);
GEM_BUG_ON(i915_gem_object_has_pages(obj));
if (obj->ops->writeback)
obj->ops->writeback(obj);
}
static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
{
struct radix_tree_iter iter;
void __rcu **slot;
rcu_read_lock();
radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
radix_tree_delete(&obj->mm.get_page.radix, iter.index);
rcu_read_unlock();
}
struct sg_table *
__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
{
struct sg_table *pages;
pages = fetch_and_zero(&obj->mm.pages);
if (IS_ERR_OR_NULL(pages))
return pages;
i915_gem_object_make_unshrinkable(obj);
if (obj->mm.mapping) {
void *ptr;
ptr = page_mask_bits(obj->mm.mapping);
if (is_vmalloc_addr(ptr))
vunmap(ptr);
else
kunmap(kmap_to_page(ptr));
obj->mm.mapping = NULL;
}
__i915_gem_object_reset_page_iter(obj);
obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
return pages;
}
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
enum i915_mm_subclass subclass)
{
struct sg_table *pages;
int err;
if (i915_gem_object_has_pinned_pages(obj))
return -EBUSY;
GEM_BUG_ON(atomic_read(&obj->bind_count));
/* May be called by shrinker from within get_pages() (on another bo) */
mutex_lock_nested(&obj->mm.lock, subclass);
if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
err = -EBUSY;
goto unlock;
}
/*
* ->put_pages might need to allocate memory for the bit17 swizzle
* array, hence protect them from being reaped by removing them from gtt
* lists early.
*/
pages = __i915_gem_object_unset_pages(obj);
/*
* XXX Temporary hijinx to avoid updating all backends to handle
* NULL pages. In the future, when we have more asynchronous
* get_pages backends we should be better able to handle the
* cancellation of the async task in a more uniform manner.
*/
if (!pages && !i915_gem_object_needs_async_cancel(obj))
pages = ERR_PTR(-EINVAL);
if (!IS_ERR(pages))
obj->ops->put_pages(obj, pages);
err = 0;
unlock:
mutex_unlock(&obj->mm.lock);
return err;
}
/* The 'mapping' part of i915_gem_object_pin_map() below */
static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
enum i915_map_type type)
{
unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
struct sg_table *sgt = obj->mm.pages;
struct sgt_iter sgt_iter;
struct page *page;
struct page *stack_pages[32];
struct page **pages = stack_pages;
unsigned long i = 0;
pgprot_t pgprot;
void *addr;
/* A single page can always be kmapped */
if (n_pages == 1 && type == I915_MAP_WB)
return kmap(sg_page(sgt->sgl));
if (n_pages > ARRAY_SIZE(stack_pages)) {
/* Too big for stack -- allocate temporary array instead */
pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
if (!pages)
return NULL;
}
for_each_sgt_page(page, sgt_iter, sgt)
pages[i++] = page;
/* Check that we have the expected number of pages */
GEM_BUG_ON(i != n_pages);
switch (type) {
default:
MISSING_CASE(type);
drm/i915: Mark expected switch fall-throughs In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. This patch fixes the following warnings: drivers/gpu/drm/i915/gem/i915_gem_mman.c: In function ‘i915_gem_fault’: drivers/gpu/drm/i915/gem/i915_gem_mman.c:342:6: warning: this statement may fall through [-Wimplicit-fallthrough=] if (!i915_terminally_wedged(i915)) ^ drivers/gpu/drm/i915/gem/i915_gem_mman.c:345:2: note: here case -EAGAIN: ^~~~ drivers/gpu/drm/i915/gem/i915_gem_pages.c: In function ‘i915_gem_object_map’: ./include/linux/compiler.h:78:22: warning: this statement may fall through [-Wimplicit-fallthrough=] # define unlikely(x) __builtin_expect(!!(x), 0) ^~~~~~~~~~~~~~~~~~~~~~~~~~ ./include/asm-generic/bug.h:136:2: note: in expansion of macro ‘unlikely’ unlikely(__ret_warn_on); \ ^~~~~~~~ drivers/gpu/drm/i915/i915_utils.h:49:25: note: in expansion of macro ‘WARN’ #define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \ ^~~~ drivers/gpu/drm/i915/gem/i915_gem_pages.c:270:3: note: in expansion of macro ‘MISSING_CASE’ MISSING_CASE(type); ^~~~~~~~~~~~ drivers/gpu/drm/i915/gem/i915_gem_pages.c:272:2: note: here case I915_MAP_WB: ^~~~ drivers/gpu/drm/i915/i915_gpu_error.c: In function ‘error_record_engine_registers’: ./include/linux/compiler.h:78:22: warning: this statement may fall through [-Wimplicit-fallthrough=] # define unlikely(x) __builtin_expect(!!(x), 0) ^~~~~~~~~~~~~~~~~~~~~~~~~~ ./include/asm-generic/bug.h:136:2: note: in expansion of macro ‘unlikely’ unlikely(__ret_warn_on); \ ^~~~~~~~ drivers/gpu/drm/i915/i915_utils.h:49:25: note: in expansion of macro ‘WARN’ #define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \ ^~~~ drivers/gpu/drm/i915/i915_gpu_error.c:1196:5: note: in expansion of macro ‘MISSING_CASE’ MISSING_CASE(engine->id); ^~~~~~~~~~~~ drivers/gpu/drm/i915/i915_gpu_error.c:1197:4: note: here case RCS0: ^~~~ drivers/gpu/drm/i915/display/intel_dp.c: In function ‘intel_dp_get_fia_supported_lane_count’: ./include/linux/compiler.h:78:22: warning: this statement may fall through [-Wimplicit-fallthrough=] # define unlikely(x) __builtin_expect(!!(x), 0) ^~~~~~~~~~~~~~~~~~~~~~~~~~ ./include/asm-generic/bug.h:136:2: note: in expansion of macro ‘unlikely’ unlikely(__ret_warn_on); \ ^~~~~~~~ drivers/gpu/drm/i915/i915_utils.h:49:25: note: in expansion of macro ‘WARN’ #define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \ ^~~~ drivers/gpu/drm/i915/display/intel_dp.c:233:3: note: in expansion of macro ‘MISSING_CASE’ MISSING_CASE(lane_info); ^~~~~~~~~~~~ drivers/gpu/drm/i915/display/intel_dp.c:234:2: note: here case 1: ^~~~ drivers/gpu/drm/i915/display/intel_display.c: In function ‘check_digital_port_conflicts’: CC [M] drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgv100.o drivers/gpu/drm/i915/display/intel_display.c:12043:7: warning: this statement may fall through [-Wimplicit-fallthrough=] if (WARN_ON(!HAS_DDI(to_i915(dev)))) ^ drivers/gpu/drm/i915/display/intel_display.c:12046:3: note: here case INTEL_OUTPUT_DP: ^~~~ Also, notice that the Makefile is modified to stop ignoring fall-through warnings. The -Wimplicit-fallthrough option will be enabled globally in v5.3. Warning level 3 was used: -Wimplicit-fallthrough=3 This patch is part of the ongoing efforts to enable -Wimplicit-fallthrough. Reviewed-by: Kees Cook <keescook@chromium.org> Signed-off-by: Gustavo A. R. Silva <gustavo@embeddedor.com>
2019-07-22 18:03:46 +00:00
/* fallthrough - to use PAGE_KERNEL anyway */
case I915_MAP_WB:
pgprot = PAGE_KERNEL;
break;
case I915_MAP_WC:
pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
break;
}
addr = vmap(pages, n_pages, 0, pgprot);
if (pages != stack_pages)
kvfree(pages);
return addr;
}
/* get, pin, and map the pages of the object into kernel space */
void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
enum i915_map_type type)
{
enum i915_map_type has_type;
bool pinned;
void *ptr;
int err;
if (unlikely(!i915_gem_object_has_struct_page(obj)))
return ERR_PTR(-ENXIO);
err = mutex_lock_interruptible(&obj->mm.lock);
if (err)
return ERR_PTR(err);
pinned = !(type & I915_MAP_OVERRIDE);
type &= ~I915_MAP_OVERRIDE;
if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
if (unlikely(!i915_gem_object_has_pages(obj))) {
GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
err = ____i915_gem_object_get_pages(obj);
if (err)
goto err_unlock;
smp_mb__before_atomic();
}
atomic_inc(&obj->mm.pages_pin_count);
pinned = false;
}
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
ptr = page_unpack_bits(obj->mm.mapping, &has_type);
if (ptr && has_type != type) {
if (pinned) {
err = -EBUSY;
goto err_unpin;
}
if (is_vmalloc_addr(ptr))
vunmap(ptr);
else
kunmap(kmap_to_page(ptr));
ptr = obj->mm.mapping = NULL;
}
if (!ptr) {
ptr = i915_gem_object_map(obj, type);
if (!ptr) {
err = -ENOMEM;
goto err_unpin;
}
obj->mm.mapping = page_pack_bits(ptr, type);
}
out_unlock:
mutex_unlock(&obj->mm.lock);
return ptr;
err_unpin:
atomic_dec(&obj->mm.pages_pin_count);
err_unlock:
ptr = ERR_PTR(err);
goto out_unlock;
}
void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
unsigned long offset,
unsigned long size)
{
enum i915_map_type has_type;
void *ptr;
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
offset, size, obj->base.size));
obj->mm.dirty = true;
if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
return;
ptr = page_unpack_bits(obj->mm.mapping, &has_type);
if (has_type == I915_MAP_WC)
return;
drm_clflush_virt_range(ptr + offset, size);
if (size == obj->base.size) {
obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
obj->cache_dirty = false;
}
}
struct scatterlist *
i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
unsigned int n,
unsigned int *offset)
{
struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
struct scatterlist *sg;
unsigned int idx, count;
might_sleep();
GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
/* As we iterate forward through the sg, we record each entry in a
* radixtree for quick repeated (backwards) lookups. If we have seen
* this index previously, we will have an entry for it.
*
* Initial lookup is O(N), but this is amortized to O(1) for
* sequential page access (where each new request is consecutive
* to the previous one). Repeated lookups are O(lg(obj->base.size)),
* i.e. O(1) with a large constant!
*/
if (n < READ_ONCE(iter->sg_idx))
goto lookup;
mutex_lock(&iter->lock);
/* We prefer to reuse the last sg so that repeated lookup of this
* (or the subsequent) sg are fast - comparing against the last
* sg is faster than going through the radixtree.
*/
sg = iter->sg_pos;
idx = iter->sg_idx;
count = __sg_page_count(sg);
while (idx + count <= n) {
void *entry;
unsigned long i;
int ret;
/* If we cannot allocate and insert this entry, or the
* individual pages from this range, cancel updating the
* sg_idx so that on this lookup we are forced to linearly
* scan onwards, but on future lookups we will try the
* insertion again (in which case we need to be careful of
* the error return reporting that we have already inserted
* this index).
*/
ret = radix_tree_insert(&iter->radix, idx, sg);
if (ret && ret != -EEXIST)
goto scan;
entry = xa_mk_value(idx);
for (i = 1; i < count; i++) {
ret = radix_tree_insert(&iter->radix, idx + i, entry);
if (ret && ret != -EEXIST)
goto scan;
}
idx += count;
sg = ____sg_next(sg);
count = __sg_page_count(sg);
}
scan:
iter->sg_pos = sg;
iter->sg_idx = idx;
mutex_unlock(&iter->lock);
if (unlikely(n < idx)) /* insertion completed by another thread */
goto lookup;
/* In case we failed to insert the entry into the radixtree, we need
* to look beyond the current sg.
*/
while (idx + count <= n) {
idx += count;
sg = ____sg_next(sg);
count = __sg_page_count(sg);
}
*offset = n - idx;
return sg;
lookup:
rcu_read_lock();
sg = radix_tree_lookup(&iter->radix, n);
GEM_BUG_ON(!sg);
/* If this index is in the middle of multi-page sg entry,
* the radix tree will contain a value entry that points
* to the start of that range. We will return the pointer to
* the base page and the offset of this page within the
* sg entry's range.
*/
*offset = 0;
if (unlikely(xa_is_value(sg))) {
unsigned long base = xa_to_value(sg);
sg = radix_tree_lookup(&iter->radix, base);
GEM_BUG_ON(!sg);
*offset = n - base;
}
rcu_read_unlock();
return sg;
}
struct page *
i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
{
struct scatterlist *sg;
unsigned int offset;
GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
sg = i915_gem_object_get_sg(obj, n, &offset);
return nth_page(sg_page(sg), offset);
}
/* Like i915_gem_object_get_page(), but mark the returned page dirty */
struct page *
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
unsigned int n)
{
struct page *page;
page = i915_gem_object_get_page(obj, n);
if (!obj->mm.dirty)
set_page_dirty(page);
return page;
}
dma_addr_t
i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
unsigned long n,
unsigned int *len)
{
struct scatterlist *sg;
unsigned int offset;
sg = i915_gem_object_get_sg(obj, n, &offset);
if (len)
*len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
return sg_dma_address(sg) + (offset << PAGE_SHIFT);
}
dma_addr_t
i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
unsigned long n)
{
return i915_gem_object_get_dma_address_len(obj, n, NULL);
}