virtio_net: move tx vq operation under tx queue lock

It's unsafe to operate a vq from multiple threads.
Unfortunately this is exactly what we do when invoking
clean tx poll from rx napi.
Same happens with napi-tx even without the
opportunistic cleaning from the receive interrupt: that races
with processing the vq in start_xmit.

As a fix move everything that deals with the vq to under tx lock.

Fixes: b92f1e6751 ("virtio-net: transmit napi")
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
Michael S. Tsirkin 2021-04-13 01:35:26 -04:00
parent 6f5312f801
commit 5a2f966d0f
1 changed files with 21 additions and 1 deletions

View File

@ -1592,6 +1592,8 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
struct virtnet_info *vi = sq->vq->vdev->priv;
unsigned int index = vq2txq(sq->vq);
struct netdev_queue *txq;
int opaque;
bool done;
if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
/* We don't need to enable cb for XDP */
@ -1601,10 +1603,28 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
txq = netdev_get_tx_queue(vi->dev, index);
__netif_tx_lock(txq, raw_smp_processor_id());
virtqueue_disable_cb(sq->vq);
free_old_xmit_skbs(sq, true);
opaque = virtqueue_enable_cb_prepare(sq->vq);
done = napi_complete_done(napi, 0);
if (!done)
virtqueue_disable_cb(sq->vq);
__netif_tx_unlock(txq);
virtqueue_napi_complete(napi, sq->vq, 0);
if (done) {
if (unlikely(virtqueue_poll(sq->vq, opaque))) {
if (napi_schedule_prep(napi)) {
__netif_tx_lock(txq, raw_smp_processor_id());
virtqueue_disable_cb(sq->vq);
__netif_tx_unlock(txq);
__napi_schedule(napi);
}
}
}
if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
netif_tx_wake_queue(txq);