linux-stable/drivers/gpu/drm/i915/gt/shmem_utils.c
Chris Wilson be1cb55a07 drm/i915/gt: Keep a no-frills swappable copy of the default context state
We need to keep the default context state around to instantiate new
contexts (aka golden rendercontext), and we also keep it pinned while
the engine is active so that we can quickly reset a hanging context.
However, the default contexts are large enough to merit keeping in
swappable memory as opposed to kernel memory, so we store them inside
shmemfs. Currently, we use the normal GEM objects to create the default
context image, but we can throw away all but the shmemfs file.

This greatly simplifies the tricky power management code which wants to
run underneath the normal GT locking, and we definitely do not want to
use any high level objects that may appear to recurse back into the GT.
Though perhaps the primary advantage of the complex GEM object is that
we aggressively cache the mapping, but here we are recreating the
vm_area everytime time we unpark. At the worst, we add a lightweight
cache, but first find a microbenchmark that is impacted.

Having started to create some utility functions to make working with
shmemfs objects easier, we can start putting them to wider use, where
GEM objects are overkill, such as storing persistent error state.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Ramalingam C <ramalingam.c@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200429172429.6054-1-chris@chris-wilson.co.uk
2020-04-29 19:02:37 +01:00

173 lines
3.3 KiB
C

// SPDX-License-Identifier: MIT
/*
* Copyright © 2020 Intel Corporation
*/
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/shmem_fs.h>
#include "gem/i915_gem_object.h"
#include "shmem_utils.h"
struct file *shmem_create_from_data(const char *name, void *data, size_t len)
{
struct file *file;
int err;
file = shmem_file_setup(name, PAGE_ALIGN(len), VM_NORESERVE);
if (IS_ERR(file))
return file;
err = shmem_write(file, 0, data, len);
if (err) {
fput(file);
return ERR_PTR(err);
}
return file;
}
struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
{
struct file *file;
void *ptr;
if (obj->ops == &i915_gem_shmem_ops) {
file = obj->base.filp;
atomic_long_inc(&file->f_count);
return file;
}
ptr = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(ptr))
return ERR_CAST(ptr);
file = shmem_create_from_data("", ptr, obj->base.size);
i915_gem_object_unpin_map(obj);
return file;
}
static size_t shmem_npte(struct file *file)
{
return file->f_mapping->host->i_size >> PAGE_SHIFT;
}
static void __shmem_unpin_map(struct file *file, void *ptr, size_t n_pte)
{
unsigned long pfn;
vunmap(ptr);
for (pfn = 0; pfn < n_pte; pfn++) {
struct page *page;
page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
GFP_KERNEL);
if (!WARN_ON(IS_ERR(page))) {
put_page(page);
put_page(page);
}
}
}
void *shmem_pin_map(struct file *file)
{
const size_t n_pte = shmem_npte(file);
pte_t *stack[32], **ptes, **mem;
struct vm_struct *area;
unsigned long pfn;
mem = stack;
if (n_pte > ARRAY_SIZE(stack)) {
mem = kvmalloc_array(n_pte, sizeof(*mem), GFP_KERNEL);
if (!mem)
return NULL;
}
area = alloc_vm_area(n_pte << PAGE_SHIFT, mem);
if (!area) {
if (mem != stack)
kvfree(mem);
return NULL;
}
ptes = mem;
for (pfn = 0; pfn < n_pte; pfn++) {
struct page *page;
page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
GFP_KERNEL);
if (IS_ERR(page))
goto err_page;
**ptes++ = mk_pte(page, PAGE_KERNEL);
}
if (mem != stack)
kvfree(mem);
mapping_set_unevictable(file->f_mapping);
return area->addr;
err_page:
if (mem != stack)
kvfree(mem);
__shmem_unpin_map(file, area->addr, pfn);
return NULL;
}
void shmem_unpin_map(struct file *file, void *ptr)
{
mapping_clear_unevictable(file->f_mapping);
__shmem_unpin_map(file, ptr, shmem_npte(file));
}
static int __shmem_rw(struct file *file, loff_t off,
void *ptr, size_t len,
bool write)
{
unsigned long pfn;
for (pfn = off >> PAGE_SHIFT; len; pfn++) {
unsigned int this =
min_t(size_t, PAGE_SIZE - offset_in_page(off), len);
struct page *page;
void *vaddr;
page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
GFP_KERNEL);
if (IS_ERR(page))
return PTR_ERR(page);
vaddr = kmap(page);
if (write)
memcpy(vaddr + offset_in_page(off), ptr, this);
else
memcpy(ptr, vaddr + offset_in_page(off), this);
kunmap(page);
put_page(page);
len -= this;
ptr += this;
off = 0;
}
return 0;
}
int shmem_read(struct file *file, loff_t off, void *dst, size_t len)
{
return __shmem_rw(file, off, dst, len, false);
}
int shmem_write(struct file *file, loff_t off, void *src, size_t len)
{
return __shmem_rw(file, off, src, len, true);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "st_shmem_utils.c"
#endif