mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-27 12:57:53 +00:00
wireless: brcmfmac: Use netif_rx().
Since commit
baebdf48c3
("net: dev: Makes sure netif_rx() can be invoked in any context.")
the function netif_rx() can be used in preemptible/thread context as
well as in interrupt context.
Use netif_rx().
Cc: Arend van Spriel <aspriel@gmail.com>
Cc: Chi-hsien Lin <chi-hsien.lin@infineon.com>
Cc: Chung-hsien Hsu <chung-hsien.hsu@infineon.com>
Cc: Franky Lin <franky.lin@broadcom.com>
Cc: Hante Meuleman <hante.meuleman@broadcom.com>
Cc: Kalle Valo <kvalo@kernel.org>
Cc: SHA-cyfmac-dev-list@infineon.com
Cc: Wright Feng <wright.feng@infineon.com>
Cc: brcm80211-dev-list.pdl@broadcom.com
Cc: linux-wireless@vger.kernel.org
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
1cd2ef9fcb
commit
b381728e7e
7 changed files with 19 additions and 28 deletions
|
@ -397,9 +397,9 @@ brcmf_proto_bcdc_add_tdls_peer(struct brcmf_pub *drvr, int ifidx,
|
|||
}
|
||||
|
||||
static void brcmf_proto_bcdc_rxreorder(struct brcmf_if *ifp,
|
||||
struct sk_buff *skb, bool inirq)
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
brcmf_fws_rxreorder(ifp, skb, inirq);
|
||||
brcmf_fws_rxreorder(ifp, skb);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
|
@ -400,7 +400,7 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp,
|
|||
spin_unlock_irqrestore(&ifp->netif_stop_lock, flags);
|
||||
}
|
||||
|
||||
void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq)
|
||||
void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
|
||||
{
|
||||
/* Most of Broadcom's firmwares send 802.11f ADD frame every time a new
|
||||
* STA connects to the AP interface. This is an obsoleted standard most
|
||||
|
@ -423,15 +423,7 @@ void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq)
|
|||
ifp->ndev->stats.rx_packets++;
|
||||
|
||||
brcmf_dbg(DATA, "rx proto=0x%X\n", ntohs(skb->protocol));
|
||||
if (inirq) {
|
||||
netif_rx(skb);
|
||||
} else {
|
||||
/* If the receive is not processed inside an ISR,
|
||||
* the softirqd must be woken explicitly to service
|
||||
* the NET_RX_SOFTIRQ. This is handled by netif_rx_ni().
|
||||
*/
|
||||
netif_rx_ni(skb);
|
||||
}
|
||||
netif_rx(skb);
|
||||
}
|
||||
|
||||
void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb)
|
||||
|
@ -480,7 +472,7 @@ void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb)
|
|||
skb->pkt_type = PACKET_OTHERHOST;
|
||||
skb->protocol = htons(ETH_P_802_2);
|
||||
|
||||
brcmf_netif_rx(ifp, skb, false);
|
||||
brcmf_netif_rx(ifp, skb);
|
||||
}
|
||||
|
||||
static int brcmf_rx_hdrpull(struct brcmf_pub *drvr, struct sk_buff *skb,
|
||||
|
@ -515,7 +507,7 @@ void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_event,
|
|||
return;
|
||||
|
||||
if (brcmf_proto_is_reorder_skb(skb)) {
|
||||
brcmf_proto_rxreorder(ifp, skb, inirq);
|
||||
brcmf_proto_rxreorder(ifp, skb);
|
||||
} else {
|
||||
/* Process special event packets */
|
||||
if (handle_event) {
|
||||
|
@ -524,7 +516,7 @@ void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_event,
|
|||
brcmf_fweh_process_skb(ifp->drvr, skb,
|
||||
BCMILCP_SUBTYPE_VENDOR_LONG, gfp);
|
||||
}
|
||||
brcmf_netif_rx(ifp, skb, inirq);
|
||||
brcmf_netif_rx(ifp, skb);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -208,7 +208,7 @@ void brcmf_remove_interface(struct brcmf_if *ifp, bool locked);
|
|||
void brcmf_txflowblock_if(struct brcmf_if *ifp,
|
||||
enum brcmf_netif_stop_reason reason, bool state);
|
||||
void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success);
|
||||
void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq);
|
||||
void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb);
|
||||
void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb);
|
||||
void brcmf_net_detach(struct net_device *ndev, bool locked);
|
||||
int brcmf_net_mon_attach(struct brcmf_if *ifp);
|
||||
|
|
|
@ -1664,7 +1664,7 @@ static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder *rfi,
|
|||
rfi->pend_pkts -= skb_queue_len(skb_list);
|
||||
}
|
||||
|
||||
void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt, bool inirq)
|
||||
void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt)
|
||||
{
|
||||
struct brcmf_pub *drvr = ifp->drvr;
|
||||
u8 *reorder_data;
|
||||
|
@ -1682,7 +1682,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt, bool inirq)
|
|||
/* validate flags and flow id */
|
||||
if (flags == 0xFF) {
|
||||
bphy_err(drvr, "invalid flags...so ignore this packet\n");
|
||||
brcmf_netif_rx(ifp, pkt, inirq);
|
||||
brcmf_netif_rx(ifp, pkt);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1694,7 +1694,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt, bool inirq)
|
|||
if (rfi == NULL) {
|
||||
brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n",
|
||||
flow_id);
|
||||
brcmf_netif_rx(ifp, pkt, inirq);
|
||||
brcmf_netif_rx(ifp, pkt);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1719,7 +1719,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt, bool inirq)
|
|||
rfi = kzalloc(buf_size, GFP_ATOMIC);
|
||||
if (rfi == NULL) {
|
||||
bphy_err(drvr, "failed to alloc buffer\n");
|
||||
brcmf_netif_rx(ifp, pkt, inirq);
|
||||
brcmf_netif_rx(ifp, pkt);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1833,7 +1833,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt, bool inirq)
|
|||
netif_rx:
|
||||
skb_queue_walk_safe(&reorder_list, pkt, pnext) {
|
||||
__skb_unlink(pkt, &reorder_list);
|
||||
brcmf_netif_rx(ifp, pkt, inirq);
|
||||
brcmf_netif_rx(ifp, pkt);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -42,6 +42,6 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp);
|
|||
void brcmf_fws_del_interface(struct brcmf_if *ifp);
|
||||
void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb);
|
||||
void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked);
|
||||
void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq);
|
||||
void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb);
|
||||
|
||||
#endif /* FWSIGNAL_H_ */
|
||||
|
|
|
@ -536,8 +536,7 @@ static int brcmf_msgbuf_hdrpull(struct brcmf_pub *drvr, bool do_fws,
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
static void brcmf_msgbuf_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb,
|
||||
bool inirq)
|
||||
static void brcmf_msgbuf_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -1191,7 +1190,7 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
|
|||
}
|
||||
|
||||
skb->protocol = eth_type_trans(skb, ifp->ndev);
|
||||
brcmf_netif_rx(ifp, skb, false);
|
||||
brcmf_netif_rx(ifp, skb);
|
||||
}
|
||||
|
||||
static void brcmf_msgbuf_process_gen_status(struct brcmf_msgbuf *msgbuf,
|
||||
|
|
|
@ -32,7 +32,7 @@ struct brcmf_proto {
|
|||
u8 peer[ETH_ALEN]);
|
||||
void (*add_tdls_peer)(struct brcmf_pub *drvr, int ifidx,
|
||||
u8 peer[ETH_ALEN]);
|
||||
void (*rxreorder)(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq);
|
||||
void (*rxreorder)(struct brcmf_if *ifp, struct sk_buff *skb);
|
||||
void (*add_if)(struct brcmf_if *ifp);
|
||||
void (*del_if)(struct brcmf_if *ifp);
|
||||
void (*reset_if)(struct brcmf_if *ifp);
|
||||
|
@ -109,9 +109,9 @@ static inline bool brcmf_proto_is_reorder_skb(struct sk_buff *skb)
|
|||
}
|
||||
|
||||
static inline void
|
||||
brcmf_proto_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq)
|
||||
brcmf_proto_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb)
|
||||
{
|
||||
ifp->drvr->proto->rxreorder(ifp, skb, inirq);
|
||||
ifp->drvr->proto->rxreorder(ifp, skb);
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
|
Loading…
Reference in a new issue