mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-31 08:28:13 +00:00
85cb928787
When the following tests last for several hours, the problem will occur.
Server:
rds-stress -r 1.1.1.16 -D 1M
Client:
rds-stress -r 1.1.1.14 -s 1.1.1.16 -D 1M -T 30
The following will occur.
"
Starting up....
tsks tx/s rx/s tx+rx K/s mbi K/s mbo K/s tx us/c rtt us cpu
%
1 0 0 0.00 0.00 0.00 0.00 0.00 -1.00
1 0 0 0.00 0.00 0.00 0.00 0.00 -1.00
1 0 0 0.00 0.00 0.00 0.00 0.00 -1.00
1 0 0 0.00 0.00 0.00 0.00 0.00 -1.00
"
>From vmcore, we can find that clean_list is NULL.
>From the source code, rds_mr_flushd calls rds_ib_mr_pool_flush_worker.
Then rds_ib_mr_pool_flush_worker calls
"
rds_ib_flush_mr_pool(pool, 0, NULL);
"
Then in function
"
int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
int free_all, struct rds_ib_mr **ibmr_ret)
"
ibmr_ret is NULL.
In the source code,
"
...
list_to_llist_nodes(pool, &unmap_list, &clean_nodes, &clean_tail);
if (ibmr_ret)
*ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode);
/* more than one entry in llist nodes */
if (clean_nodes->next)
llist_add_batch(clean_nodes->next, clean_tail, &pool->clean_list);
...
"
When ibmr_ret is NULL, llist_entry is not executed. clean_nodes->next
instead of clean_nodes is added in clean_list.
So clean_nodes is discarded. It can not be used again.
The workqueue is executed periodically. So more and more clean_nodes are
discarded. Finally the clean_list is NULL.
Then this problem will occur.
Fixes: 1bc144b625
("net, rds, Replace xlist in net/rds/xlist.h with llist")
Signed-off-by: Zhu Yanjun <yanjun.zhu@oracle.com>
Acked-by: Santosh Shilimkar <santosh.shilimkar@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
651 lines
17 KiB
C
651 lines
17 KiB
C
/*
|
|
* Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
|
|
*
|
|
* This software is available to you under a choice of one of two
|
|
* licenses. You may choose to be licensed under the terms of the GNU
|
|
* General Public License (GPL) Version 2, available from the file
|
|
* COPYING in the main directory of this source tree, or the
|
|
* OpenIB.org BSD license below:
|
|
*
|
|
* Redistribution and use in source and binary forms, with or
|
|
* without modification, are permitted provided that the following
|
|
* conditions are met:
|
|
*
|
|
* - Redistributions of source code must retain the above
|
|
* copyright notice, this list of conditions and the following
|
|
* disclaimer.
|
|
*
|
|
* - Redistributions in binary form must reproduce the above
|
|
* copyright notice, this list of conditions and the following
|
|
* disclaimer in the documentation and/or other materials
|
|
* provided with the distribution.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
* SOFTWARE.
|
|
*
|
|
*/
|
|
#include <linux/kernel.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/rculist.h>
|
|
#include <linux/llist.h>
|
|
|
|
#include "rds_single_path.h"
|
|
#include "ib_mr.h"
|
|
|
|
struct workqueue_struct *rds_ib_mr_wq;
|
|
|
|
static DEFINE_PER_CPU(unsigned long, clean_list_grace);
|
|
#define CLEAN_LIST_BUSY_BIT 0
|
|
|
|
static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr)
|
|
{
|
|
struct rds_ib_device *rds_ibdev;
|
|
struct rds_ib_ipaddr *i_ipaddr;
|
|
|
|
rcu_read_lock();
|
|
list_for_each_entry_rcu(rds_ibdev, &rds_ib_devices, list) {
|
|
list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
|
|
if (i_ipaddr->ipaddr == ipaddr) {
|
|
refcount_inc(&rds_ibdev->refcount);
|
|
rcu_read_unlock();
|
|
return rds_ibdev;
|
|
}
|
|
}
|
|
}
|
|
rcu_read_unlock();
|
|
|
|
return NULL;
|
|
}
|
|
|
|
static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
|
|
{
|
|
struct rds_ib_ipaddr *i_ipaddr;
|
|
|
|
i_ipaddr = kmalloc(sizeof *i_ipaddr, GFP_KERNEL);
|
|
if (!i_ipaddr)
|
|
return -ENOMEM;
|
|
|
|
i_ipaddr->ipaddr = ipaddr;
|
|
|
|
spin_lock_irq(&rds_ibdev->spinlock);
|
|
list_add_tail_rcu(&i_ipaddr->list, &rds_ibdev->ipaddr_list);
|
|
spin_unlock_irq(&rds_ibdev->spinlock);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
|
|
{
|
|
struct rds_ib_ipaddr *i_ipaddr;
|
|
struct rds_ib_ipaddr *to_free = NULL;
|
|
|
|
|
|
spin_lock_irq(&rds_ibdev->spinlock);
|
|
list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
|
|
if (i_ipaddr->ipaddr == ipaddr) {
|
|
list_del_rcu(&i_ipaddr->list);
|
|
to_free = i_ipaddr;
|
|
break;
|
|
}
|
|
}
|
|
spin_unlock_irq(&rds_ibdev->spinlock);
|
|
|
|
if (to_free)
|
|
kfree_rcu(to_free, rcu);
|
|
}
|
|
|
|
int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev,
|
|
struct in6_addr *ipaddr)
|
|
{
|
|
struct rds_ib_device *rds_ibdev_old;
|
|
|
|
rds_ibdev_old = rds_ib_get_device(ipaddr->s6_addr32[3]);
|
|
if (!rds_ibdev_old)
|
|
return rds_ib_add_ipaddr(rds_ibdev, ipaddr->s6_addr32[3]);
|
|
|
|
if (rds_ibdev_old != rds_ibdev) {
|
|
rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr->s6_addr32[3]);
|
|
rds_ib_dev_put(rds_ibdev_old);
|
|
return rds_ib_add_ipaddr(rds_ibdev, ipaddr->s6_addr32[3]);
|
|
}
|
|
rds_ib_dev_put(rds_ibdev_old);
|
|
|
|
return 0;
|
|
}
|
|
|
|
void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
|
|
{
|
|
struct rds_ib_connection *ic = conn->c_transport_data;
|
|
|
|
/* conn was previously on the nodev_conns_list */
|
|
spin_lock_irq(&ib_nodev_conns_lock);
|
|
BUG_ON(list_empty(&ib_nodev_conns));
|
|
BUG_ON(list_empty(&ic->ib_node));
|
|
list_del(&ic->ib_node);
|
|
|
|
spin_lock(&rds_ibdev->spinlock);
|
|
list_add_tail(&ic->ib_node, &rds_ibdev->conn_list);
|
|
spin_unlock(&rds_ibdev->spinlock);
|
|
spin_unlock_irq(&ib_nodev_conns_lock);
|
|
|
|
ic->rds_ibdev = rds_ibdev;
|
|
refcount_inc(&rds_ibdev->refcount);
|
|
}
|
|
|
|
void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
|
|
{
|
|
struct rds_ib_connection *ic = conn->c_transport_data;
|
|
|
|
/* place conn on nodev_conns_list */
|
|
spin_lock(&ib_nodev_conns_lock);
|
|
|
|
spin_lock_irq(&rds_ibdev->spinlock);
|
|
BUG_ON(list_empty(&ic->ib_node));
|
|
list_del(&ic->ib_node);
|
|
spin_unlock_irq(&rds_ibdev->spinlock);
|
|
|
|
list_add_tail(&ic->ib_node, &ib_nodev_conns);
|
|
|
|
spin_unlock(&ib_nodev_conns_lock);
|
|
|
|
ic->rds_ibdev = NULL;
|
|
rds_ib_dev_put(rds_ibdev);
|
|
}
|
|
|
|
void rds_ib_destroy_nodev_conns(void)
|
|
{
|
|
struct rds_ib_connection *ic, *_ic;
|
|
LIST_HEAD(tmp_list);
|
|
|
|
/* avoid calling conn_destroy with irqs off */
|
|
spin_lock_irq(&ib_nodev_conns_lock);
|
|
list_splice(&ib_nodev_conns, &tmp_list);
|
|
spin_unlock_irq(&ib_nodev_conns_lock);
|
|
|
|
list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node)
|
|
rds_conn_destroy(ic->conn);
|
|
}
|
|
|
|
void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo)
|
|
{
|
|
struct rds_ib_mr_pool *pool_1m = rds_ibdev->mr_1m_pool;
|
|
|
|
iinfo->rdma_mr_max = pool_1m->max_items;
|
|
iinfo->rdma_mr_size = pool_1m->fmr_attr.max_pages;
|
|
}
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
void rds6_ib_get_mr_info(struct rds_ib_device *rds_ibdev,
|
|
struct rds6_info_rdma_connection *iinfo6)
|
|
{
|
|
struct rds_ib_mr_pool *pool_1m = rds_ibdev->mr_1m_pool;
|
|
|
|
iinfo6->rdma_mr_max = pool_1m->max_items;
|
|
iinfo6->rdma_mr_size = pool_1m->fmr_attr.max_pages;
|
|
}
|
|
#endif
|
|
|
|
struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *pool)
|
|
{
|
|
struct rds_ib_mr *ibmr = NULL;
|
|
struct llist_node *ret;
|
|
unsigned long *flag;
|
|
|
|
preempt_disable();
|
|
flag = this_cpu_ptr(&clean_list_grace);
|
|
set_bit(CLEAN_LIST_BUSY_BIT, flag);
|
|
ret = llist_del_first(&pool->clean_list);
|
|
if (ret) {
|
|
ibmr = llist_entry(ret, struct rds_ib_mr, llnode);
|
|
if (pool->pool_type == RDS_IB_MR_8K_POOL)
|
|
rds_ib_stats_inc(s_ib_rdma_mr_8k_reused);
|
|
else
|
|
rds_ib_stats_inc(s_ib_rdma_mr_1m_reused);
|
|
}
|
|
|
|
clear_bit(CLEAN_LIST_BUSY_BIT, flag);
|
|
preempt_enable();
|
|
return ibmr;
|
|
}
|
|
|
|
static inline void wait_clean_list_grace(void)
|
|
{
|
|
int cpu;
|
|
unsigned long *flag;
|
|
|
|
for_each_online_cpu(cpu) {
|
|
flag = &per_cpu(clean_list_grace, cpu);
|
|
while (test_bit(CLEAN_LIST_BUSY_BIT, flag))
|
|
cpu_relax();
|
|
}
|
|
}
|
|
|
|
void rds_ib_sync_mr(void *trans_private, int direction)
|
|
{
|
|
struct rds_ib_mr *ibmr = trans_private;
|
|
struct rds_ib_device *rds_ibdev = ibmr->device;
|
|
|
|
switch (direction) {
|
|
case DMA_FROM_DEVICE:
|
|
ib_dma_sync_sg_for_cpu(rds_ibdev->dev, ibmr->sg,
|
|
ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
|
|
break;
|
|
case DMA_TO_DEVICE:
|
|
ib_dma_sync_sg_for_device(rds_ibdev->dev, ibmr->sg,
|
|
ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
|
|
break;
|
|
}
|
|
}
|
|
|
|
void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
|
|
{
|
|
struct rds_ib_device *rds_ibdev = ibmr->device;
|
|
|
|
if (ibmr->sg_dma_len) {
|
|
ib_dma_unmap_sg(rds_ibdev->dev,
|
|
ibmr->sg, ibmr->sg_len,
|
|
DMA_BIDIRECTIONAL);
|
|
ibmr->sg_dma_len = 0;
|
|
}
|
|
|
|
/* Release the s/g list */
|
|
if (ibmr->sg_len) {
|
|
unsigned int i;
|
|
|
|
for (i = 0; i < ibmr->sg_len; ++i) {
|
|
struct page *page = sg_page(&ibmr->sg[i]);
|
|
|
|
/* FIXME we need a way to tell a r/w MR
|
|
* from a r/o MR */
|
|
WARN_ON(!page->mapping && irqs_disabled());
|
|
set_page_dirty(page);
|
|
put_page(page);
|
|
}
|
|
kfree(ibmr->sg);
|
|
|
|
ibmr->sg = NULL;
|
|
ibmr->sg_len = 0;
|
|
}
|
|
}
|
|
|
|
void rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
|
|
{
|
|
unsigned int pinned = ibmr->sg_len;
|
|
|
|
__rds_ib_teardown_mr(ibmr);
|
|
if (pinned) {
|
|
struct rds_ib_mr_pool *pool = ibmr->pool;
|
|
|
|
atomic_sub(pinned, &pool->free_pinned);
|
|
}
|
|
}
|
|
|
|
static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all)
|
|
{
|
|
unsigned int item_count;
|
|
|
|
item_count = atomic_read(&pool->item_count);
|
|
if (free_all)
|
|
return item_count;
|
|
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* given an llist of mrs, put them all into the list_head for more processing
|
|
*/
|
|
static unsigned int llist_append_to_list(struct llist_head *llist,
|
|
struct list_head *list)
|
|
{
|
|
struct rds_ib_mr *ibmr;
|
|
struct llist_node *node;
|
|
struct llist_node *next;
|
|
unsigned int count = 0;
|
|
|
|
node = llist_del_all(llist);
|
|
while (node) {
|
|
next = node->next;
|
|
ibmr = llist_entry(node, struct rds_ib_mr, llnode);
|
|
list_add_tail(&ibmr->unmap_list, list);
|
|
node = next;
|
|
count++;
|
|
}
|
|
return count;
|
|
}
|
|
|
|
/*
|
|
* this takes a list head of mrs and turns it into linked llist nodes
|
|
* of clusters. Each cluster has linked llist nodes of
|
|
* MR_CLUSTER_SIZE mrs that are ready for reuse.
|
|
*/
|
|
static void list_to_llist_nodes(struct rds_ib_mr_pool *pool,
|
|
struct list_head *list,
|
|
struct llist_node **nodes_head,
|
|
struct llist_node **nodes_tail)
|
|
{
|
|
struct rds_ib_mr *ibmr;
|
|
struct llist_node *cur = NULL;
|
|
struct llist_node **next = nodes_head;
|
|
|
|
list_for_each_entry(ibmr, list, unmap_list) {
|
|
cur = &ibmr->llnode;
|
|
*next = cur;
|
|
next = &cur->next;
|
|
}
|
|
*next = NULL;
|
|
*nodes_tail = cur;
|
|
}
|
|
|
|
/*
|
|
* Flush our pool of MRs.
|
|
* At a minimum, all currently unused MRs are unmapped.
|
|
* If the number of MRs allocated exceeds the limit, we also try
|
|
* to free as many MRs as needed to get back to this limit.
|
|
*/
|
|
int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
|
|
int free_all, struct rds_ib_mr **ibmr_ret)
|
|
{
|
|
struct rds_ib_mr *ibmr;
|
|
struct llist_node *clean_nodes;
|
|
struct llist_node *clean_tail;
|
|
LIST_HEAD(unmap_list);
|
|
unsigned long unpinned = 0;
|
|
unsigned int nfreed = 0, dirty_to_clean = 0, free_goal;
|
|
|
|
if (pool->pool_type == RDS_IB_MR_8K_POOL)
|
|
rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_flush);
|
|
else
|
|
rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_flush);
|
|
|
|
if (ibmr_ret) {
|
|
DEFINE_WAIT(wait);
|
|
while (!mutex_trylock(&pool->flush_lock)) {
|
|
ibmr = rds_ib_reuse_mr(pool);
|
|
if (ibmr) {
|
|
*ibmr_ret = ibmr;
|
|
finish_wait(&pool->flush_wait, &wait);
|
|
goto out_nolock;
|
|
}
|
|
|
|
prepare_to_wait(&pool->flush_wait, &wait,
|
|
TASK_UNINTERRUPTIBLE);
|
|
if (llist_empty(&pool->clean_list))
|
|
schedule();
|
|
|
|
ibmr = rds_ib_reuse_mr(pool);
|
|
if (ibmr) {
|
|
*ibmr_ret = ibmr;
|
|
finish_wait(&pool->flush_wait, &wait);
|
|
goto out_nolock;
|
|
}
|
|
}
|
|
finish_wait(&pool->flush_wait, &wait);
|
|
} else
|
|
mutex_lock(&pool->flush_lock);
|
|
|
|
if (ibmr_ret) {
|
|
ibmr = rds_ib_reuse_mr(pool);
|
|
if (ibmr) {
|
|
*ibmr_ret = ibmr;
|
|
goto out;
|
|
}
|
|
}
|
|
|
|
/* Get the list of all MRs to be dropped. Ordering matters -
|
|
* we want to put drop_list ahead of free_list.
|
|
*/
|
|
dirty_to_clean = llist_append_to_list(&pool->drop_list, &unmap_list);
|
|
dirty_to_clean += llist_append_to_list(&pool->free_list, &unmap_list);
|
|
if (free_all)
|
|
llist_append_to_list(&pool->clean_list, &unmap_list);
|
|
|
|
free_goal = rds_ib_flush_goal(pool, free_all);
|
|
|
|
if (list_empty(&unmap_list))
|
|
goto out;
|
|
|
|
if (pool->use_fastreg)
|
|
rds_ib_unreg_frmr(&unmap_list, &nfreed, &unpinned, free_goal);
|
|
else
|
|
rds_ib_unreg_fmr(&unmap_list, &nfreed, &unpinned, free_goal);
|
|
|
|
if (!list_empty(&unmap_list)) {
|
|
/* we have to make sure that none of the things we're about
|
|
* to put on the clean list would race with other cpus trying
|
|
* to pull items off. The llist would explode if we managed to
|
|
* remove something from the clean list and then add it back again
|
|
* while another CPU was spinning on that same item in llist_del_first.
|
|
*
|
|
* This is pretty unlikely, but just in case wait for an llist grace period
|
|
* here before adding anything back into the clean list.
|
|
*/
|
|
wait_clean_list_grace();
|
|
|
|
list_to_llist_nodes(pool, &unmap_list, &clean_nodes, &clean_tail);
|
|
if (ibmr_ret) {
|
|
*ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode);
|
|
clean_nodes = clean_nodes->next;
|
|
}
|
|
/* more than one entry in llist nodes */
|
|
if (clean_nodes)
|
|
llist_add_batch(clean_nodes, clean_tail,
|
|
&pool->clean_list);
|
|
|
|
}
|
|
|
|
atomic_sub(unpinned, &pool->free_pinned);
|
|
atomic_sub(dirty_to_clean, &pool->dirty_count);
|
|
atomic_sub(nfreed, &pool->item_count);
|
|
|
|
out:
|
|
mutex_unlock(&pool->flush_lock);
|
|
if (waitqueue_active(&pool->flush_wait))
|
|
wake_up(&pool->flush_wait);
|
|
out_nolock:
|
|
return 0;
|
|
}
|
|
|
|
struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *pool)
|
|
{
|
|
struct rds_ib_mr *ibmr = NULL;
|
|
int iter = 0;
|
|
|
|
while (1) {
|
|
ibmr = rds_ib_reuse_mr(pool);
|
|
if (ibmr)
|
|
return ibmr;
|
|
|
|
if (atomic_inc_return(&pool->item_count) <= pool->max_items)
|
|
break;
|
|
|
|
atomic_dec(&pool->item_count);
|
|
|
|
if (++iter > 2) {
|
|
if (pool->pool_type == RDS_IB_MR_8K_POOL)
|
|
rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_depleted);
|
|
else
|
|
rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_depleted);
|
|
return ERR_PTR(-EAGAIN);
|
|
}
|
|
|
|
/* We do have some empty MRs. Flush them out. */
|
|
if (pool->pool_type == RDS_IB_MR_8K_POOL)
|
|
rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_wait);
|
|
else
|
|
rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_wait);
|
|
|
|
rds_ib_flush_mr_pool(pool, 0, &ibmr);
|
|
if (ibmr)
|
|
return ibmr;
|
|
}
|
|
|
|
return ibmr;
|
|
}
|
|
|
|
static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
|
|
{
|
|
struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work);
|
|
|
|
rds_ib_flush_mr_pool(pool, 0, NULL);
|
|
}
|
|
|
|
void rds_ib_free_mr(void *trans_private, int invalidate)
|
|
{
|
|
struct rds_ib_mr *ibmr = trans_private;
|
|
struct rds_ib_mr_pool *pool = ibmr->pool;
|
|
struct rds_ib_device *rds_ibdev = ibmr->device;
|
|
|
|
rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len);
|
|
|
|
/* Return it to the pool's free list */
|
|
if (rds_ibdev->use_fastreg)
|
|
rds_ib_free_frmr_list(ibmr);
|
|
else
|
|
rds_ib_free_fmr_list(ibmr);
|
|
|
|
atomic_add(ibmr->sg_len, &pool->free_pinned);
|
|
atomic_inc(&pool->dirty_count);
|
|
|
|
/* If we've pinned too many pages, request a flush */
|
|
if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
|
|
atomic_read(&pool->dirty_count) >= pool->max_items / 5)
|
|
queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
|
|
|
|
if (invalidate) {
|
|
if (likely(!in_interrupt())) {
|
|
rds_ib_flush_mr_pool(pool, 0, NULL);
|
|
} else {
|
|
/* We get here if the user created a MR marked
|
|
* as use_once and invalidate at the same time.
|
|
*/
|
|
queue_delayed_work(rds_ib_mr_wq,
|
|
&pool->flush_worker, 10);
|
|
}
|
|
}
|
|
|
|
rds_ib_dev_put(rds_ibdev);
|
|
}
|
|
|
|
void rds_ib_flush_mrs(void)
|
|
{
|
|
struct rds_ib_device *rds_ibdev;
|
|
|
|
down_read(&rds_ib_devices_lock);
|
|
list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
|
|
if (rds_ibdev->mr_8k_pool)
|
|
rds_ib_flush_mr_pool(rds_ibdev->mr_8k_pool, 0, NULL);
|
|
|
|
if (rds_ibdev->mr_1m_pool)
|
|
rds_ib_flush_mr_pool(rds_ibdev->mr_1m_pool, 0, NULL);
|
|
}
|
|
up_read(&rds_ib_devices_lock);
|
|
}
|
|
|
|
void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
|
|
struct rds_sock *rs, u32 *key_ret,
|
|
struct rds_connection *conn)
|
|
{
|
|
struct rds_ib_device *rds_ibdev;
|
|
struct rds_ib_mr *ibmr = NULL;
|
|
struct rds_ib_connection *ic = NULL;
|
|
int ret;
|
|
|
|
rds_ibdev = rds_ib_get_device(rs->rs_bound_addr.s6_addr32[3]);
|
|
if (!rds_ibdev) {
|
|
ret = -ENODEV;
|
|
goto out;
|
|
}
|
|
|
|
if (conn)
|
|
ic = conn->c_transport_data;
|
|
|
|
if (!rds_ibdev->mr_8k_pool || !rds_ibdev->mr_1m_pool) {
|
|
ret = -ENODEV;
|
|
goto out;
|
|
}
|
|
|
|
if (rds_ibdev->use_fastreg)
|
|
ibmr = rds_ib_reg_frmr(rds_ibdev, ic, sg, nents, key_ret);
|
|
else
|
|
ibmr = rds_ib_reg_fmr(rds_ibdev, sg, nents, key_ret);
|
|
if (IS_ERR(ibmr)) {
|
|
ret = PTR_ERR(ibmr);
|
|
pr_warn("RDS/IB: rds_ib_get_mr failed (errno=%d)\n", ret);
|
|
} else {
|
|
return ibmr;
|
|
}
|
|
|
|
out:
|
|
if (rds_ibdev)
|
|
rds_ib_dev_put(rds_ibdev);
|
|
|
|
return ERR_PTR(ret);
|
|
}
|
|
|
|
void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
|
|
{
|
|
cancel_delayed_work_sync(&pool->flush_worker);
|
|
rds_ib_flush_mr_pool(pool, 1, NULL);
|
|
WARN_ON(atomic_read(&pool->item_count));
|
|
WARN_ON(atomic_read(&pool->free_pinned));
|
|
kfree(pool);
|
|
}
|
|
|
|
struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev,
|
|
int pool_type)
|
|
{
|
|
struct rds_ib_mr_pool *pool;
|
|
|
|
pool = kzalloc(sizeof(*pool), GFP_KERNEL);
|
|
if (!pool)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
pool->pool_type = pool_type;
|
|
init_llist_head(&pool->free_list);
|
|
init_llist_head(&pool->drop_list);
|
|
init_llist_head(&pool->clean_list);
|
|
mutex_init(&pool->flush_lock);
|
|
init_waitqueue_head(&pool->flush_wait);
|
|
INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
|
|
|
|
if (pool_type == RDS_IB_MR_1M_POOL) {
|
|
/* +1 allows for unaligned MRs */
|
|
pool->fmr_attr.max_pages = RDS_MR_1M_MSG_SIZE + 1;
|
|
pool->max_items = rds_ibdev->max_1m_mrs;
|
|
} else {
|
|
/* pool_type == RDS_IB_MR_8K_POOL */
|
|
pool->fmr_attr.max_pages = RDS_MR_8K_MSG_SIZE + 1;
|
|
pool->max_items = rds_ibdev->max_8k_mrs;
|
|
}
|
|
|
|
pool->max_free_pinned = pool->max_items * pool->fmr_attr.max_pages / 4;
|
|
pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps;
|
|
pool->fmr_attr.page_shift = PAGE_SHIFT;
|
|
pool->max_items_soft = rds_ibdev->max_mrs * 3 / 4;
|
|
pool->use_fastreg = rds_ibdev->use_fastreg;
|
|
|
|
return pool;
|
|
}
|
|
|
|
int rds_ib_mr_init(void)
|
|
{
|
|
rds_ib_mr_wq = alloc_workqueue("rds_mr_flushd", WQ_MEM_RECLAIM, 0);
|
|
if (!rds_ib_mr_wq)
|
|
return -ENOMEM;
|
|
return 0;
|
|
}
|
|
|
|
/* By the time this is called all the IB devices should have been torn down and
|
|
* had their pools freed. As each pool is freed its work struct is waited on,
|
|
* so the pool flushing work queue should be idle by the time we get here.
|
|
*/
|
|
void rds_ib_mr_exit(void)
|
|
{
|
|
destroy_workqueue(rds_ib_mr_wq);
|
|
}
|