mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-30 08:02:30 +00:00
virtio_ring: Avoid loop when vq is broken in virtqueue_poll
[ Upstream commit 481a0d7422
]
The loop may exist if vq->broken is true,
virtqueue_get_buf_ctx_packed or virtqueue_get_buf_ctx_split
will return NULL, so virtnet_poll will reschedule napi to
receive packet, it will lead cpu usage(si) to 100%.
call trace as below:
virtnet_poll
virtnet_receive
virtqueue_get_buf_ctx
virtqueue_get_buf_ctx_packed
virtqueue_get_buf_ctx_split
virtqueue_napi_complete
virtqueue_poll //return true
virtqueue_napi_schedule //it will reschedule napi
to fix this, return false if vq is broken in virtqueue_poll.
Signed-off-by: Mao Wenan <wenan.mao@linux.alibaba.com>
Acked-by: Michael S. Tsirkin <mst@redhat.com>
Link: https://lore.kernel.org/r/1596354249-96204-1-git-send-email-wenan.mao@linux.alibaba.com
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Jason Wang <jasowang@redhat.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
34f8368f66
commit
05724341d9
1 changed files with 3 additions and 0 deletions
|
@ -1960,6 +1960,9 @@ bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
|
|||
{
|
||||
struct vring_virtqueue *vq = to_vvq(_vq);
|
||||
|
||||
if (unlikely(vq->broken))
|
||||
return false;
|
||||
|
||||
virtio_mb(vq->weak_barriers);
|
||||
return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) :
|
||||
virtqueue_poll_split(_vq, last_used_idx);
|
||||
|
|
Loading…
Reference in a new issue