mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-29 23:53:32 +00:00
Merge branch 'hwbm-locking-fixes'
Gregory CLEMENT says: ==================== Fix spinlock usage in HWBM these two patches fix spinlock related issues introduced in v4.6. They have been reported by Russell King and Jean-Jacques Hiblot. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
217f97e856
2 changed files with 4 additions and 0 deletions
|
@ -189,6 +189,7 @@ struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
|
|||
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
|
||||
hwbm_pool->construct = mvneta_bm_construct;
|
||||
hwbm_pool->priv = new_pool;
|
||||
spin_lock_init(&hwbm_pool->lock);
|
||||
|
||||
/* Create new pool */
|
||||
err = mvneta_bm_pool_create(priv, new_pool);
|
||||
|
|
|
@ -55,18 +55,21 @@ int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num, gfp_t gfp)
|
|||
spin_lock_irqsave(&bm_pool->lock, flags);
|
||||
if (bm_pool->buf_num == bm_pool->size) {
|
||||
pr_warn("pool already filled\n");
|
||||
spin_unlock_irqrestore(&bm_pool->lock, flags);
|
||||
return bm_pool->buf_num;
|
||||
}
|
||||
|
||||
if (buf_num + bm_pool->buf_num > bm_pool->size) {
|
||||
pr_warn("cannot allocate %d buffers for pool\n",
|
||||
buf_num);
|
||||
spin_unlock_irqrestore(&bm_pool->lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if ((buf_num + bm_pool->buf_num) < bm_pool->buf_num) {
|
||||
pr_warn("Adding %d buffers to the %d current buffers will overflow\n",
|
||||
buf_num, bm_pool->buf_num);
|
||||
spin_unlock_irqrestore(&bm_pool->lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue