net: mac802154: Introduce a tx queue flushing mechanism

Right now we are able to stop a queue but we have no indication if a
transmission is ongoing or not.

Thanks to recent additions, we can track the number of ongoing
transmissions so we know if the last transmission is over. Adding on top
of it an internal wait queue also allows to be woken up asynchronously
when this happens. If, beforehands, we marked the queue to be held and
stopped it, we end up flushing and stopping the tx queue.

Thanks to this feature, we will soon be able to introduce a synchronous
transmit API.

Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
Acked-by: Alexander Aring <aahringo@redhat.com>
Link: https://lore.kernel.org/r/20220519150516.443078-9-miquel.raynal@bootlin.com
Signed-off-by: Stefan Schmidt <stefan@datenfreihafen.org>
This commit is contained in:
Miquel Raynal 2022-05-19 17:05:13 +02:00 committed by Stefan Schmidt
parent a40612f399
commit f0feb34904
6 changed files with 32 additions and 5 deletions

View File

@ -218,6 +218,7 @@ struct wpan_phy {
spinlock_t queue_lock;
atomic_t ongoing_txs;
atomic_t hold_txs;
wait_queue_head_t sync_txq;
char priv[] __aligned(NETDEV_ALIGN);
};

View File

@ -129,6 +129,7 @@ wpan_phy_new(const struct cfg802154_ops *ops, size_t priv_size)
wpan_phy_net_set(&rdev->wpan_phy, &init_net);
init_waitqueue_head(&rdev->dev_wait);
init_waitqueue_head(&rdev->wpan_phy.sync_txq);
spin_lock_init(&rdev->wpan_phy.queue_lock);

View File

@ -46,7 +46,7 @@ static int ieee802154_suspend(struct wpan_phy *wpan_phy)
if (!local->open_count)
goto suspend;
ieee802154_hold_queue(local);
ieee802154_sync_and_hold_queue(local);
synchronize_net();
/* stop hardware - this must stop RX */

View File

@ -124,6 +124,7 @@ extern struct ieee802154_mlme_ops mac802154_mlme_wpan;
void ieee802154_rx(struct ieee802154_local *local, struct sk_buff *skb);
void ieee802154_xmit_sync_worker(struct work_struct *work);
int ieee802154_sync_and_hold_queue(struct ieee802154_local *local);
netdev_tx_t
ieee802154_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev);
netdev_tx_t

View File

@ -44,7 +44,8 @@ void ieee802154_xmit_sync_worker(struct work_struct *work)
err_tx:
/* Restart the netif queue on each sub_if_data object. */
ieee802154_release_queue(local);
atomic_dec(&local->phy->ongoing_txs);
if (!atomic_dec_and_test(&local->phy->ongoing_txs))
wake_up(&local->phy->sync_txq);
kfree_skb(skb);
netdev_dbg(dev, "transmission failed\n");
}
@ -100,12 +101,33 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
err_wake_netif_queue:
ieee802154_release_queue(local);
atomic_dec(&local->phy->ongoing_txs);
if (!atomic_dec_and_test(&local->phy->ongoing_txs))
wake_up(&local->phy->sync_txq);
err_free_skb:
kfree_skb(skb);
return NETDEV_TX_OK;
}
static int ieee802154_sync_queue(struct ieee802154_local *local)
{
int ret;
ieee802154_hold_queue(local);
ieee802154_disable_queue(local);
wait_event(local->phy->sync_txq, !atomic_read(&local->phy->ongoing_txs));
ret = local->tx_result;
ieee802154_release_queue(local);
return ret;
}
int ieee802154_sync_and_hold_queue(struct ieee802154_local *local)
{
ieee802154_hold_queue(local);
return ieee802154_sync_queue(local);
}
static netdev_tx_t
ieee802154_hot_tx(struct ieee802154_local *local, struct sk_buff *skb)
{

View File

@ -140,7 +140,8 @@ void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb,
}
dev_consume_skb_any(skb);
atomic_dec(&hw->phy->ongoing_txs);
if (!atomic_dec_and_test(&hw->phy->ongoing_txs))
wake_up(&hw->phy->sync_txq);
}
EXPORT_SYMBOL(ieee802154_xmit_complete);
@ -152,7 +153,8 @@ void ieee802154_xmit_error(struct ieee802154_hw *hw, struct sk_buff *skb,
local->tx_result = reason;
ieee802154_release_queue(local);
dev_kfree_skb_any(skb);
atomic_dec(&hw->phy->ongoing_txs);
if (!atomic_dec_and_test(&hw->phy->ongoing_txs))
wake_up(&hw->phy->sync_txq);
}
EXPORT_SYMBOL(ieee802154_xmit_error);