staging: rtl8723bs: unwrap initialization of queues

unwrap initialization of queues to avoid false positive
lockdep warning:

[   27.350258] ============================================
[   27.350267] WARNING: possible recursive locking detected
[   27.350276] 5.14.0-rc6+ #16 Tainted: G         C OE
[   27.350288] --------------------------------------------
[   27.350295] RTW_CMD_THREAD/679 is trying to acquire lock:
[   27.350306] ffffa846c03290c8 (&(pqueue->lock)){+.-.}-{2:2},
		at: rtw_alloc_network+0x1b/0xa0 [r8723bs]
[   27.350441]
               but task is already holding lock:
[   27.350448] ffffa846c0329118 (&(pqueue->lock)){+.-.}-{2:2},
		at: rtw_update_scanned_network+0x33/0x1d0 [r8723bs]
[   27.350573]
               other info that might help us debug this:
[   27.350581]  Possible unsafe locking scenario:

[   27.350588]        CPU0
[   27.350594]        ----
[   27.350600]   lock(&(pqueue->lock));
[   27.350614]   lock(&(pqueue->lock));
[   27.350627]
                *** DEADLOCK ***

[   27.350634]  May be due to missing lock nesting notation

[   27.350641] 2 locks held by RTW_CMD_THREAD/679:
[   27.350652]  #0: ffffa846c0329038 (&pmlmepriv->lock){+...}-{2:2},
	at: rtw_survey_event_callback+0x2d/0xe0 [r8723bs]
[   27.350780]  #1: ffffa846c0329118 (&(pqueue->lock)){+.-.}-{2:2},
	at: rtw_update_scanned_network+0x33/0x1d0 [r8723bs]
[   27.350907]
               stack backtrace:
[   27.350916] CPU: 3 PID: 679 Comm: RTW_CMD_THREAD Tainted: G
		C OE     5.14.0-rc6+ #16
[   27.350933] Hardware name: LENOVO 80NR/Madrid, BIOS DACN25WW
		08/20/2015
[   27.350943] Call Trace:
[   27.350959]  dump_stack_lvl+0x56/0x6f
[   27.350982]  __lock_acquire.cold.79+0x137/0x298
[   27.351012]  lock_acquire+0xb4/0x2c0
[   27.351031]  ? rtw_alloc_network+0x1b/0xa0 [r8723bs]
[   27.351140]  ? rtw_update_scanned_network+0x33/0x1d0 [r8723bs]
[   27.351254]  _raw_spin_lock_bh+0x34/0x40
[   27.351271]  ? rtw_alloc_network+0x1b/0xa0 [r8723bs]
[   27.351378]  rtw_alloc_network+0x1b/0xa0 [r8723bs]
[   27.351488]  rtw_update_scanned_network+0xa5/0x1d0 [r8723bs]
[   27.351605]  rtw_survey_event_callback+0x54/0xe0 [r8723bs]
[   27.351719]  mlme_evt_hdl+0x4e/0x70 [r8723bs]
[   27.351839]  rtw_cmd_thread+0x16c/0x3d0 [r8723bs]
[   27.351945]  ? rtw_stop_cmd_thread+0x50/0x50 [r8723bs]
[   27.352045]  kthread+0x136/0x160
[   27.352064]  ? set_kthread_struct+0x40/0x40
[   27.352083]  ret_from_fork+0x22/0x30

This happens because the wrapping function _rtw_init_queues()
bring lockdep considering all queues as a single one. But
all queues are different with their own lock.

Applied the following semantic patch:

@@
expression a;
@@

-       _rtw_init_queue(&a);
+       INIT_LIST_HEAD(&a.queue);
+       spin_lock_init(&a.lock);

Reported-by: Hans De Goede <hdegoede@redhat.com>
Reviewed-by: Hans de Goede <hdegoede@redhat.com>
Signed-off-by: Fabio Aiuto <fabioaiuto83@gmail.com>
Link: https://lore.kernel.org/r/c2c3a18cc2b883feab74f150ccbaa4f2cc11995c.1630307025.git.fabioaiuto83@gmail.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Fabio Aiuto 2021-08-30 09:09:23 +02:00 committed by Greg Kroah-Hartman
parent 8ffd91d9e8
commit d1cfdcad99
7 changed files with 52 additions and 26 deletions

View file

@ -18,7 +18,8 @@ void init_mlme_ap_info(struct adapter *padapter)
spin_lock_init(&pmlmepriv->bcn_update_lock);
/* for ACL */
_rtw_init_queue(&pacl_list->acl_node_q);
INIT_LIST_HEAD(&pacl_list->acl_node_q.queue);
spin_lock_init(&pacl_list->acl_node_q.lock);
/* pmlmeext->bstart_bss = false; */

View file

@ -166,7 +166,8 @@ int rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
init_completion(&pcmdpriv->cmd_queue_comp);
init_completion(&pcmdpriv->terminate_cmdthread_comp);
_rtw_init_queue(&(pcmdpriv->cmd_queue));
INIT_LIST_HEAD(&(pcmdpriv->cmd_queue).queue);
spin_lock_init(&(pcmdpriv->cmd_queue).lock);
/* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */

View file

@ -27,8 +27,10 @@ int rtw_init_mlme_priv(struct adapter *padapter)
pmlmepriv->scan_mode = SCAN_ACTIVE;/* 1: active, 0: pasive. Maybe someday we should rename this varable to "active_mode" (Jeff) */
spin_lock_init(&pmlmepriv->lock);
_rtw_init_queue(&pmlmepriv->free_bss_pool);
_rtw_init_queue(&pmlmepriv->scanned_queue);
INIT_LIST_HEAD(&pmlmepriv->free_bss_pool.queue);
spin_lock_init(&pmlmepriv->free_bss_pool.lock);
INIT_LIST_HEAD(&pmlmepriv->scanned_queue.queue);
spin_lock_init(&pmlmepriv->scanned_queue.lock);
set_scanned_network_val(pmlmepriv, 0);

View file

@ -25,7 +25,8 @@ void _rtw_init_sta_recv_priv(struct sta_recv_priv *psta_recvpriv)
/* for (i = 0; i<MAX_RX_NUMBLKS; i++) */
/* _rtw_init_queue(&psta_recvpriv->blk_strms[i]); */
_rtw_init_queue(&psta_recvpriv->defrag_q);
INIT_LIST_HEAD(&psta_recvpriv->defrag_q.queue);
spin_lock_init(&psta_recvpriv->defrag_q.lock);
}
signed int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
@ -36,9 +37,12 @@ signed int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *pada
spin_lock_init(&precvpriv->lock);
_rtw_init_queue(&precvpriv->free_recv_queue);
_rtw_init_queue(&precvpriv->recv_pending_queue);
_rtw_init_queue(&precvpriv->uc_swdec_pending_queue);
INIT_LIST_HEAD(&precvpriv->free_recv_queue.queue);
spin_lock_init(&precvpriv->free_recv_queue.lock);
INIT_LIST_HEAD(&precvpriv->recv_pending_queue.queue);
spin_lock_init(&precvpriv->recv_pending_queue.lock);
INIT_LIST_HEAD(&precvpriv->uc_swdec_pending_queue.queue);
spin_lock_init(&precvpriv->uc_swdec_pending_queue.lock);
precvpriv->adapter = padapter;

View file

@ -19,7 +19,8 @@ void _rtw_init_stainfo(struct sta_info *psta)
/* INIT_LIST_HEAD(&psta->sleep_list); */
/* INIT_LIST_HEAD(&psta->wakeup_list); */
_rtw_init_queue(&psta->sleep_q);
INIT_LIST_HEAD(&psta->sleep_q.queue);
spin_lock_init(&psta->sleep_q.lock);
psta->sleepq_len = 0;
_rtw_init_sta_xmit_priv(&psta->sta_xmitpriv);
@ -62,14 +63,17 @@ u32 _rtw_init_sta_priv(struct sta_priv *pstapriv)
pstapriv->pstainfo_buf = pstapriv->pallocated_stainfo_buf + 4 -
((SIZE_PTR)(pstapriv->pallocated_stainfo_buf) & 3);
_rtw_init_queue(&pstapriv->free_sta_queue);
INIT_LIST_HEAD(&pstapriv->free_sta_queue.queue);
spin_lock_init(&pstapriv->free_sta_queue.lock);
spin_lock_init(&pstapriv->sta_hash_lock);
/* _rtw_init_queue(&pstapriv->asoc_q); */
pstapriv->asoc_sta_count = 0;
_rtw_init_queue(&pstapriv->sleep_q);
_rtw_init_queue(&pstapriv->wakeup_q);
INIT_LIST_HEAD(&pstapriv->sleep_q.queue);
spin_lock_init(&pstapriv->sleep_q.lock);
INIT_LIST_HEAD(&pstapriv->wakeup_q.queue);
spin_lock_init(&pstapriv->wakeup_q.lock);
psta = (struct sta_info *)(pstapriv->pstainfo_buf);
@ -242,7 +246,8 @@ struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
/* preorder_ctrl->wsize_b = (NR_RECVBUFF-2); */
preorder_ctrl->wsize_b = 64;/* 64; */
_rtw_init_queue(&preorder_ctrl->pending_recvframe_queue);
INIT_LIST_HEAD(&preorder_ctrl->pending_recvframe_queue.queue);
spin_lock_init(&preorder_ctrl->pending_recvframe_queue.lock);
rtw_init_recv_timer(preorder_ctrl);
}

View file

@ -13,7 +13,8 @@ static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
static void _init_txservq(struct tx_servq *ptxservq)
{
INIT_LIST_HEAD(&ptxservq->tx_pending);
_rtw_init_queue(&ptxservq->sta_pending);
INIT_LIST_HEAD(&ptxservq->sta_pending.queue);
spin_lock_init(&ptxservq->sta_pending.lock);
ptxservq->qcnt = 0;
}
@ -49,13 +50,19 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxmitpriv->adapter = padapter;
_rtw_init_queue(&pxmitpriv->be_pending);
_rtw_init_queue(&pxmitpriv->bk_pending);
_rtw_init_queue(&pxmitpriv->vi_pending);
_rtw_init_queue(&pxmitpriv->vo_pending);
_rtw_init_queue(&pxmitpriv->bm_pending);
INIT_LIST_HEAD(&pxmitpriv->be_pending.queue);
spin_lock_init(&pxmitpriv->be_pending.lock);
INIT_LIST_HEAD(&pxmitpriv->bk_pending.queue);
spin_lock_init(&pxmitpriv->bk_pending.lock);
INIT_LIST_HEAD(&pxmitpriv->vi_pending.queue);
spin_lock_init(&pxmitpriv->vi_pending.lock);
INIT_LIST_HEAD(&pxmitpriv->vo_pending.queue);
spin_lock_init(&pxmitpriv->vo_pending.lock);
INIT_LIST_HEAD(&pxmitpriv->bm_pending.queue);
spin_lock_init(&pxmitpriv->bm_pending.lock);
_rtw_init_queue(&pxmitpriv->free_xmit_queue);
INIT_LIST_HEAD(&pxmitpriv->free_xmit_queue.queue);
spin_lock_init(&pxmitpriv->free_xmit_queue.lock);
/*
* Please allocate memory with the sz = (struct xmit_frame) * NR_XMITFRAME,
@ -96,8 +103,10 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxmitpriv->frag_len = MAX_FRAG_THRESHOLD;
/* init xmit_buf */
_rtw_init_queue(&pxmitpriv->free_xmitbuf_queue);
_rtw_init_queue(&pxmitpriv->pending_xmitbuf_queue);
INIT_LIST_HEAD(&pxmitpriv->free_xmitbuf_queue.queue);
spin_lock_init(&pxmitpriv->free_xmitbuf_queue.lock);
INIT_LIST_HEAD(&pxmitpriv->pending_xmitbuf_queue.queue);
spin_lock_init(&pxmitpriv->pending_xmitbuf_queue.lock);
pxmitpriv->pallocated_xmitbuf = vzalloc(NR_XMITBUFF * sizeof(struct xmit_buf) + 4);
@ -145,7 +154,8 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxmitpriv->free_xmitbuf_cnt = NR_XMITBUFF;
/* init xframe_ext queue, the same count as extbuf */
_rtw_init_queue(&pxmitpriv->free_xframe_ext_queue);
INIT_LIST_HEAD(&pxmitpriv->free_xframe_ext_queue.queue);
spin_lock_init(&pxmitpriv->free_xframe_ext_queue.lock);
pxmitpriv->xframe_ext_alloc_addr = vzalloc(NR_XMIT_EXTBUFF * sizeof(struct xmit_frame) + 4);
@ -178,7 +188,8 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxmitpriv->free_xframe_ext_cnt = NR_XMIT_EXTBUFF;
/* Init xmit extension buff */
_rtw_init_queue(&pxmitpriv->free_xmit_extbuf_queue);
INIT_LIST_HEAD(&pxmitpriv->free_xmit_extbuf_queue.queue);
spin_lock_init(&pxmitpriv->free_xmit_extbuf_queue.lock);
pxmitpriv->pallocated_xmit_extbuf = vzalloc(NR_XMIT_EXTBUFF * sizeof(struct xmit_buf) + 4);

View file

@ -378,8 +378,10 @@ s32 rtl8723bs_init_recv_priv(struct adapter *padapter)
precvpriv = &padapter->recvpriv;
/* 3 1. init recv buffer */
_rtw_init_queue(&precvpriv->free_recv_buf_queue);
_rtw_init_queue(&precvpriv->recv_buf_pending_queue);
INIT_LIST_HEAD(&precvpriv->free_recv_buf_queue.queue);
spin_lock_init(&precvpriv->free_recv_buf_queue.lock);
INIT_LIST_HEAD(&precvpriv->recv_buf_pending_queue.queue);
spin_lock_init(&precvpriv->recv_buf_pending_queue.lock);
n = NR_RECVBUFF * sizeof(struct recv_buf) + 4;
precvpriv->pallocated_recv_buf = rtw_zmalloc(n);