media: atomisp: remove dynamic and reserved pool code

There are no callers of this code atm; and looking at the atomisp
memory-management code if anything we want to make it simpler and
not re-introduce use of these pools, so remove the pool code.

Link: https://lore.kernel.org/linux-media/20220615205037.16549-11-hdegoede@redhat.com
Reviewed-by: Andy Shevchenko <andy.shevchenko@gmail.com>
Signed-off-by: Hans de Goede <hdegoede@redhat.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab@kernel.org>
This commit is contained in:
Hans de Goede 2022-06-15 21:50:07 +01:00 committed by Mauro Carvalho Chehab
parent ad4c63c3ba
commit b50b217fe2
3 changed files with 0 additions and 489 deletions

View file

@ -45,9 +45,7 @@ atomisp-objs += \
pci/camera/pipe/src/pipe_util.o \
pci/camera/util/src/util.o \
pci/hmm/hmm_bo.o \
pci/hmm/hmm_dynamic_pool.o \
pci/hmm/hmm.o \
pci/hmm/hmm_reserved_pool.o \
pci/ia_css_device_access.o \
pci/ia_css_isp_configs.o \
pci/ia_css_isp_states.o \

View file

@ -1,234 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Medifield PNW Camera Imaging ISP subsystem.
*
* Copyright (c) 2010 Intel Corporation. All Rights Reserved.
*
* Copyright (c) 2010 Silicon Hive www.siliconhive.com.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
*/
/*
* This file contains functions for dynamic memory pool management
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <asm/set_memory.h>
#include "atomisp_internal.h"
#include "hmm/hmm_pool.h"
/*
* dynamic memory pool ops.
*/
static unsigned int get_pages_from_dynamic_pool(void *pool,
struct hmm_page_object *page_obj,
unsigned int size, bool cached)
{
struct hmm_page *hmm_page;
unsigned long flags;
unsigned int i = 0;
struct hmm_dynamic_pool_info *dypool_info = pool;
if (!dypool_info)
return 0;
spin_lock_irqsave(&dypool_info->list_lock, flags);
if (dypool_info->initialized) {
while (!list_empty(&dypool_info->pages_list)) {
hmm_page = list_entry(dypool_info->pages_list.next,
struct hmm_page, list);
list_del(&hmm_page->list);
dypool_info->pgnr--;
spin_unlock_irqrestore(&dypool_info->list_lock, flags);
page_obj[i].page = hmm_page->page;
page_obj[i++].type = HMM_PAGE_TYPE_DYNAMIC;
kmem_cache_free(dypool_info->pgptr_cache, hmm_page);
if (i == size)
return i;
spin_lock_irqsave(&dypool_info->list_lock, flags);
}
}
spin_unlock_irqrestore(&dypool_info->list_lock, flags);
return i;
}
static void free_pages_to_dynamic_pool(void *pool,
struct hmm_page_object *page_obj)
{
struct hmm_page *hmm_page;
unsigned long flags;
int ret;
struct hmm_dynamic_pool_info *dypool_info = pool;
if (!dypool_info)
return;
spin_lock_irqsave(&dypool_info->list_lock, flags);
if (!dypool_info->initialized) {
spin_unlock_irqrestore(&dypool_info->list_lock, flags);
return;
}
spin_unlock_irqrestore(&dypool_info->list_lock, flags);
if (page_obj->type == HMM_PAGE_TYPE_RESERVED)
return;
if (dypool_info->pgnr >= dypool_info->pool_size) {
/* free page directly back to system */
ret = set_pages_wb(page_obj->page, 1);
if (ret)
dev_err(atomisp_dev,
"set page to WB err ...ret=%d\n", ret);
/*
W/A: set_pages_wb seldom return value = -EFAULT
indicate that address of page is not in valid
range(0xffff880000000000~0xffffc7ffffffffff)
then, _free_pages would panic; Do not know why page
address be valid, it maybe memory corruption by lowmemory
*/
if (!ret) {
__free_pages(page_obj->page, 0);
hmm_mem_stat.sys_size--;
}
return;
}
hmm_page = kmem_cache_zalloc(dypool_info->pgptr_cache,
GFP_KERNEL);
if (!hmm_page) {
/* free page directly */
ret = set_pages_wb(page_obj->page, 1);
if (ret)
dev_err(atomisp_dev,
"set page to WB err ...ret=%d\n", ret);
if (!ret) {
__free_pages(page_obj->page, 0);
hmm_mem_stat.sys_size--;
}
return;
}
hmm_page->page = page_obj->page;
/*
* add to pages_list of pages_pool
*/
spin_lock_irqsave(&dypool_info->list_lock, flags);
list_add_tail(&hmm_page->list, &dypool_info->pages_list);
dypool_info->pgnr++;
spin_unlock_irqrestore(&dypool_info->list_lock, flags);
hmm_mem_stat.dyc_size++;
}
static int hmm_dynamic_pool_init(void **pool, unsigned int pool_size)
{
struct hmm_dynamic_pool_info *dypool_info;
if (pool_size == 0)
return 0;
dypool_info = kmalloc(sizeof(struct hmm_dynamic_pool_info),
GFP_KERNEL);
if (unlikely(!dypool_info))
return -ENOMEM;
dypool_info->pgptr_cache = kmem_cache_create("pgptr_cache",
sizeof(struct hmm_page), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!dypool_info->pgptr_cache) {
kfree(dypool_info);
return -ENOMEM;
}
INIT_LIST_HEAD(&dypool_info->pages_list);
spin_lock_init(&dypool_info->list_lock);
dypool_info->initialized = true;
dypool_info->pool_size = pool_size;
dypool_info->pgnr = 0;
*pool = dypool_info;
return 0;
}
static void hmm_dynamic_pool_exit(void **pool)
{
struct hmm_dynamic_pool_info *dypool_info = *pool;
struct hmm_page *hmm_page;
unsigned long flags;
int ret;
if (!dypool_info)
return;
spin_lock_irqsave(&dypool_info->list_lock, flags);
if (!dypool_info->initialized) {
spin_unlock_irqrestore(&dypool_info->list_lock, flags);
return;
}
dypool_info->initialized = false;
while (!list_empty(&dypool_info->pages_list)) {
hmm_page = list_entry(dypool_info->pages_list.next,
struct hmm_page, list);
list_del(&hmm_page->list);
spin_unlock_irqrestore(&dypool_info->list_lock, flags);
/* can cause thread sleep, so cannot be put into spin_lock */
ret = set_pages_wb(hmm_page->page, 1);
if (ret)
dev_err(atomisp_dev,
"set page to WB err...ret=%d\n", ret);
if (!ret) {
__free_pages(hmm_page->page, 0);
hmm_mem_stat.dyc_size--;
hmm_mem_stat.sys_size--;
}
kmem_cache_free(dypool_info->pgptr_cache, hmm_page);
spin_lock_irqsave(&dypool_info->list_lock, flags);
}
spin_unlock_irqrestore(&dypool_info->list_lock, flags);
kmem_cache_destroy(dypool_info->pgptr_cache);
kfree(dypool_info);
*pool = NULL;
}
static int hmm_dynamic_pool_inited(void *pool)
{
struct hmm_dynamic_pool_info *dypool_info = pool;
if (!dypool_info)
return 0;
return dypool_info->initialized;
}
struct hmm_pool_ops dynamic_pops = {
.pool_init = hmm_dynamic_pool_init,
.pool_exit = hmm_dynamic_pool_exit,
.pool_alloc_pages = get_pages_from_dynamic_pool,
.pool_free_pages = free_pages_to_dynamic_pool,
.pool_inited = hmm_dynamic_pool_inited,
};

View file

@ -1,253 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Medifield PNW Camera Imaging ISP subsystem.
*
* Copyright (c) 2010 Intel Corporation. All Rights Reserved.
*
* Copyright (c) 2010 Silicon Hive www.siliconhive.com.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
*/
/*
* This file contains functions for reserved memory pool management
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <asm/set_memory.h>
#include "atomisp_internal.h"
#include "hmm/hmm_pool.h"
/*
* reserved memory pool ops.
*/
static unsigned int get_pages_from_reserved_pool(void *pool,
struct hmm_page_object *page_obj,
unsigned int size, bool cached)
{
unsigned long flags;
unsigned int i = 0;
unsigned int repool_pgnr;
int j;
struct hmm_reserved_pool_info *repool_info = pool;
if (!repool_info)
return 0;
spin_lock_irqsave(&repool_info->list_lock, flags);
if (repool_info->initialized) {
repool_pgnr = repool_info->index;
for (j = repool_pgnr - 1; j >= 0; j--) {
page_obj[i].page = repool_info->pages[j];
page_obj[i].type = HMM_PAGE_TYPE_RESERVED;
i++;
repool_info->index--;
if (i == size)
break;
}
}
spin_unlock_irqrestore(&repool_info->list_lock, flags);
return i;
}
static void free_pages_to_reserved_pool(void *pool,
struct hmm_page_object *page_obj)
{
unsigned long flags;
struct hmm_reserved_pool_info *repool_info = pool;
if (!repool_info)
return;
spin_lock_irqsave(&repool_info->list_lock, flags);
if (repool_info->initialized &&
repool_info->index < repool_info->pgnr &&
page_obj->type == HMM_PAGE_TYPE_RESERVED) {
repool_info->pages[repool_info->index++] = page_obj->page;
}
spin_unlock_irqrestore(&repool_info->list_lock, flags);
}
static int hmm_reserved_pool_setup(struct hmm_reserved_pool_info **repool_info,
unsigned int pool_size)
{
struct hmm_reserved_pool_info *pool_info;
pool_info = kmalloc(sizeof(struct hmm_reserved_pool_info),
GFP_KERNEL);
if (unlikely(!pool_info))
return -ENOMEM;
pool_info->pages = kmalloc(sizeof(struct page *) * pool_size,
GFP_KERNEL);
if (unlikely(!pool_info->pages)) {
kfree(pool_info);
return -ENOMEM;
}
pool_info->index = 0;
pool_info->pgnr = 0;
spin_lock_init(&pool_info->list_lock);
pool_info->initialized = true;
*repool_info = pool_info;
return 0;
}
static int hmm_reserved_pool_init(void **pool, unsigned int pool_size)
{
int ret;
unsigned int blk_pgnr;
unsigned int pgnr = pool_size;
unsigned int order = 0;
unsigned int i = 0;
int fail_number = 0;
struct page *pages;
int j;
struct hmm_reserved_pool_info *repool_info;
if (pool_size == 0)
return 0;
ret = hmm_reserved_pool_setup(&repool_info, pool_size);
if (ret) {
dev_err(atomisp_dev, "hmm_reserved_pool_setup failed.\n");
return ret;
}
pgnr = pool_size;
i = 0;
order = MAX_ORDER;
while (pgnr) {
blk_pgnr = 1U << order;
while (blk_pgnr > pgnr) {
order--;
blk_pgnr >>= 1U;
}
BUG_ON(order > MAX_ORDER);
pages = alloc_pages(GFP_KERNEL | __GFP_NOWARN, order);
if (unlikely(!pages)) {
if (order == 0) {
fail_number++;
dev_err(atomisp_dev, "%s: alloc_pages failed: %d\n",
__func__, fail_number);
/* if fail five times, will goto end */
/* FIXME: whether is the mechanism is ok? */
if (fail_number == ALLOC_PAGE_FAIL_NUM)
goto end;
} else {
order--;
}
} else {
blk_pgnr = 1U << order;
ret = set_pages_uc(pages, blk_pgnr);
if (ret) {
dev_err(atomisp_dev,
"set pages uncached failed\n");
__free_pages(pages, order);
goto end;
}
for (j = 0; j < blk_pgnr; j++)
repool_info->pages[i++] = pages + j;
repool_info->index += blk_pgnr;
repool_info->pgnr += blk_pgnr;
pgnr -= blk_pgnr;
fail_number = 0;
}
}
end:
repool_info->initialized = true;
*pool = repool_info;
dev_info(atomisp_dev,
"hmm_reserved_pool init successfully,hmm_reserved_pool is with %d pages.\n",
repool_info->pgnr);
return 0;
}
static void hmm_reserved_pool_exit(void **pool)
{
unsigned long flags;
int i, ret;
unsigned int pgnr;
struct hmm_reserved_pool_info *repool_info = *pool;
if (!repool_info)
return;
spin_lock_irqsave(&repool_info->list_lock, flags);
if (!repool_info->initialized) {
spin_unlock_irqrestore(&repool_info->list_lock, flags);
return;
}
pgnr = repool_info->pgnr;
repool_info->index = 0;
repool_info->pgnr = 0;
repool_info->initialized = false;
spin_unlock_irqrestore(&repool_info->list_lock, flags);
for (i = 0; i < pgnr; i++) {
ret = set_pages_wb(repool_info->pages[i], 1);
if (ret)
dev_err(atomisp_dev,
"set page to WB err...ret=%d\n", ret);
/*
W/A: set_pages_wb seldom return value = -EFAULT
indicate that address of page is not in valid
range(0xffff880000000000~0xffffc7ffffffffff)
then, _free_pages would panic; Do not know why
page address be valid, it maybe memory corruption by lowmemory
*/
if (!ret)
__free_pages(repool_info->pages[i], 0);
}
kfree(repool_info->pages);
kfree(repool_info);
*pool = NULL;
}
static int hmm_reserved_pool_inited(void *pool)
{
struct hmm_reserved_pool_info *repool_info = pool;
if (!repool_info)
return 0;
return repool_info->initialized;
}
struct hmm_pool_ops reserved_pops = {
.pool_init = hmm_reserved_pool_init,
.pool_exit = hmm_reserved_pool_exit,
.pool_alloc_pages = get_pages_from_reserved_pool,
.pool_free_pages = free_pages_to_reserved_pool,
.pool_inited = hmm_reserved_pool_inited,
};