media: atomisp: remove hmm pool code

Since we never register any pools, this is all dead code,
remove it.

Link: https://lore.kernel.org/linux-media/20220615205037.16549-12-hdegoede@redhat.com
Reviewed-by: Andy Shevchenko <andy.shevchenko@gmail.com>
Signed-off-by: Hans de Goede <hdegoede@redhat.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab@kernel.org>
This commit is contained in:
Hans de Goede 2022-06-15 21:50:08 +01:00 committed by Mauro Carvalho Chehab
parent b50b217fe2
commit c35f36b7c1
5 changed files with 8 additions and 245 deletions

View file

@ -26,7 +26,8 @@
#include <linux/slab.h>
#include <linux/mm.h>
#include "hmm/hmm_pool.h"
#include "hmm_common.h"
#include "hmm/hmm_bo.h"
#include "ia_css_types.h"
#define mmgr_NULL ((ia_css_ptr)0)

View file

@ -280,9 +280,6 @@ void hmm_bo_vunmap(struct hmm_buffer_object *bo);
int hmm_bo_mmap(struct vm_area_struct *vma,
struct hmm_buffer_object *bo);
extern struct hmm_pool dynamic_pool;
extern struct hmm_pool reserved_pool;
/*
* find the buffer object by its virtual address vaddr.
* return NULL if no such buffer object found.

View file

@ -1,116 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Support for Medifield PNW Camera Imaging ISP subsystem.
*
* Copyright (c) 2010 Intel Corporation. All Rights Reserved.
*
* Copyright (c) 2010 Silicon Hive www.siliconhive.com.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
*/
#ifndef __HMM_POOL_H__
#define __HMM_POOL_H__
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/kref.h>
#include "hmm_common.h"
#include "hmm/hmm_bo.h"
#define ALLOC_PAGE_FAIL_NUM 5
enum hmm_pool_type {
HMM_POOL_TYPE_RESERVED,
HMM_POOL_TYPE_DYNAMIC,
};
/**
* struct hmm_pool_ops - memory pool callbacks.
*
* @pool_init: initialize the memory pool.
* @pool_exit: uninitialize the memory pool.
* @pool_alloc_pages: allocate pages from memory pool.
* @pool_free_pages: free pages to memory pool.
* @pool_inited: check whether memory pool is initialized.
*/
struct hmm_pool_ops {
int (*pool_init)(void **pool, unsigned int pool_size);
void (*pool_exit)(void **pool);
unsigned int (*pool_alloc_pages)(void *pool,
struct hmm_page_object *page_obj,
unsigned int size, bool cached);
void (*pool_free_pages)(void *pool,
struct hmm_page_object *page_obj);
int (*pool_inited)(void *pool);
};
struct hmm_pool {
struct hmm_pool_ops *pops;
void *pool_info;
};
/**
* struct hmm_reserved_pool_info - represents reserved pool private data.
* @pages: a array that store physical pages.
* The array is as reserved memory pool.
* @index: to indicate the first blank page number
* in reserved memory pool(pages array).
* @pgnr: the valid page amount in reserved memory
* pool.
* @list_lock: list lock is used to protect the operation
* to reserved memory pool.
* @flag: reserved memory pool state flag.
*/
struct hmm_reserved_pool_info {
struct page **pages;
unsigned int index;
unsigned int pgnr;
spinlock_t list_lock;
bool initialized;
};
/**
* struct hmm_dynamic_pool_info - represents dynamic pool private data.
* @pages_list: a list that store physical pages.
* The pages list is as dynamic memory pool.
* @list_lock: list lock is used to protect the operation
* to dynamic memory pool.
* @flag: dynamic memory pool state flag.
* @pgptr_cache: struct kmem_cache, manages a cache.
*/
struct hmm_dynamic_pool_info {
struct list_head pages_list;
/* list lock is used to protect the free pages block lists */
spinlock_t list_lock;
struct kmem_cache *pgptr_cache;
bool initialized;
unsigned int pool_size;
unsigned int pgnr;
};
struct hmm_page {
struct page *page;
struct list_head list;
};
extern struct hmm_pool_ops reserved_pops;
extern struct hmm_pool_ops dynamic_pops;
#endif

View file

@ -28,7 +28,6 @@
#include <linux/sysfs.h>
#include "hmm/hmm.h"
#include "hmm/hmm_pool.h"
#include "hmm/hmm_bo.h"
#include "atomisp_internal.h"
@ -37,8 +36,6 @@
#include "mmu/sh_mmu_mrfld.h"
struct hmm_bo_device bo_device;
struct hmm_pool dynamic_pool;
struct hmm_pool reserved_pool;
static ia_css_ptr dummy_ptr = mmgr_EXCEPTION;
static bool hmm_initialized;
struct _hmm_mem_stat hmm_mem_stat;
@ -113,62 +110,13 @@ static ssize_t free_bo_show(struct device *dev, struct device_attribute *attr,
return bo_show(dev, attr, buf, &bo_device.entire_bo_list, false);
}
static ssize_t reserved_pool_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
ssize_t ret = 0;
struct hmm_reserved_pool_info *pinfo = reserved_pool.pool_info;
unsigned long flags;
if (!pinfo || !pinfo->initialized)
return 0;
spin_lock_irqsave(&pinfo->list_lock, flags);
ret = scnprintf(buf, PAGE_SIZE, "%d out of %d pages available\n",
pinfo->index, pinfo->pgnr);
spin_unlock_irqrestore(&pinfo->list_lock, flags);
if (ret > 0)
ret++; /* Add trailing zero, not included by scnprintf */
return ret;
};
static ssize_t dynamic_pool_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
ssize_t ret = 0;
struct hmm_dynamic_pool_info *pinfo = dynamic_pool.pool_info;
unsigned long flags;
if (!pinfo || !pinfo->initialized)
return 0;
spin_lock_irqsave(&pinfo->list_lock, flags);
ret = scnprintf(buf, PAGE_SIZE, "%d (max %d) pages available\n",
pinfo->pgnr, pinfo->pool_size);
spin_unlock_irqrestore(&pinfo->list_lock, flags);
if (ret > 0)
ret++; /* Add trailing zero, not included by scnprintf */
return ret;
};
static DEVICE_ATTR_RO(active_bo);
static DEVICE_ATTR_RO(free_bo);
static DEVICE_ATTR_RO(reserved_pool);
static DEVICE_ATTR_RO(dynamic_pool);
static struct attribute *sysfs_attrs_ctrl[] = {
&dev_attr_active_bo.attr,
&dev_attr_free_bo.attr,
&dev_attr_reserved_pool.attr,
&dev_attr_dynamic_pool.attr,
NULL
};

View file

@ -42,7 +42,6 @@
#include "atomisp_internal.h"
#include "hmm/hmm_common.h"
#include "hmm/hmm_pool.h"
#include "hmm/hmm_bo.h"
static unsigned int order_to_nr(unsigned int order)
@ -627,8 +626,6 @@ struct hmm_buffer_object *hmm_bo_device_search_vmap_start(
}
static void free_private_bo_pages(struct hmm_buffer_object *bo,
struct hmm_pool *dypool,
struct hmm_pool *repool,
int free_pgnr)
{
int i, ret;
@ -636,36 +633,9 @@ static void free_private_bo_pages(struct hmm_buffer_object *bo,
for (i = 0; i < free_pgnr; i++) {
switch (bo->page_obj[i].type) {
case HMM_PAGE_TYPE_RESERVED:
if (repool->pops
&& repool->pops->pool_free_pages) {
repool->pops->pool_free_pages(repool->pool_info,
&bo->page_obj[i]);
hmm_mem_stat.res_cnt--;
}
break;
/*
* HMM_PAGE_TYPE_GENERAL indicates that pages are from system
* memory, so when free them, they should be put into dynamic
* pool.
*/
case HMM_PAGE_TYPE_DYNAMIC:
case HMM_PAGE_TYPE_GENERAL:
if (dypool->pops
&& dypool->pops->pool_inited
&& dypool->pops->pool_inited(dypool->pool_info)) {
if (dypool->pops->pool_free_pages)
dypool->pops->pool_free_pages(
dypool->pool_info,
&bo->page_obj[i]);
break;
}
fallthrough;
/*
* if dynamic memory pool doesn't exist, need to free
* pages to system directly.
*/
default:
ret = set_pages_wb(bo->page_obj[i].page, 1);
if (ret)
@ -693,9 +663,7 @@ static void free_private_bo_pages(struct hmm_buffer_object *bo,
/*Allocate pages which will be used only by ISP*/
static int alloc_private_pages(struct hmm_buffer_object *bo,
int from_highmem,
bool cached,
struct hmm_pool *dypool,
struct hmm_pool *repool)
bool cached)
{
int ret;
unsigned int pgnr, order, blk_pgnr, alloc_pgnr;
@ -719,37 +687,6 @@ static int alloc_private_pages(struct hmm_buffer_object *bo,
i = 0;
alloc_pgnr = 0;
/*
* get physical pages from dynamic pages pool.
*/
if (dypool->pops && dypool->pops->pool_alloc_pages) {
alloc_pgnr = dypool->pops->pool_alloc_pages(dypool->pool_info,
bo->page_obj, pgnr,
cached);
hmm_mem_stat.dyc_size -= alloc_pgnr;
if (alloc_pgnr == pgnr)
return 0;
}
pgnr -= alloc_pgnr;
i += alloc_pgnr;
/*
* get physical pages from reserved pages pool for atomisp.
*/
if (repool->pops && repool->pops->pool_alloc_pages) {
alloc_pgnr = repool->pops->pool_alloc_pages(repool->pool_info,
&bo->page_obj[i], pgnr,
cached);
hmm_mem_stat.res_cnt += alloc_pgnr;
if (alloc_pgnr == pgnr)
return 0;
}
pgnr -= alloc_pgnr;
i += alloc_pgnr;
while (pgnr) {
order = nr_to_order_bottom(pgnr);
/*
@ -841,19 +778,16 @@ static int alloc_private_pages(struct hmm_buffer_object *bo,
return 0;
cleanup:
alloc_pgnr = i;
free_private_bo_pages(bo, dypool, repool, alloc_pgnr);
free_private_bo_pages(bo, alloc_pgnr);
kfree(bo->page_obj);
return -ENOMEM;
}
static void free_private_pages(struct hmm_buffer_object *bo,
struct hmm_pool *dypool,
struct hmm_pool *repool)
static void free_private_pages(struct hmm_buffer_object *bo)
{
free_private_bo_pages(bo, dypool, repool, bo->pgnr);
free_private_bo_pages(bo, bo->pgnr);
kfree(bo->page_obj);
}
@ -993,8 +927,7 @@ int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
* add HMM_BO_USER type
*/
if (type == HMM_BO_PRIVATE) {
ret = alloc_private_pages(bo, from_highmem,
cached, &dynamic_pool, &reserved_pool);
ret = alloc_private_pages(bo, from_highmem, cached);
} else if (type == HMM_BO_USER) {
ret = alloc_user_pages(bo, userptr, cached);
} else {
@ -1038,7 +971,7 @@ void hmm_bo_free_pages(struct hmm_buffer_object *bo)
bo->status &= (~HMM_BO_PAGE_ALLOCED);
if (bo->type == HMM_BO_PRIVATE)
free_private_pages(bo, &dynamic_pool, &reserved_pool);
free_private_pages(bo);
else if (bo->type == HMM_BO_USER)
free_user_pages(bo, bo->pgnr);
else