mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-14 12:37:32 +00:00
netvsc: fix NAPI performance regression
When using NAPI, the single stream performance declined signifcantly because the poll routine was updating host after every burst of packets. This excess signalling caused host throttling. This fix restores the old behavior. Host is only signalled after the ring has been emptied. Signed-off-by: Stephen Hemminger <sthemmin@microsoft.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
6069f3fbde
commit
f4f1c23d6e
2 changed files with 19 additions and 23 deletions
|
@ -723,6 +723,7 @@ struct net_device_context {
|
||||||
/* Per channel data */
|
/* Per channel data */
|
||||||
struct netvsc_channel {
|
struct netvsc_channel {
|
||||||
struct vmbus_channel *channel;
|
struct vmbus_channel *channel;
|
||||||
|
const struct vmpacket_descriptor *desc;
|
||||||
struct napi_struct napi;
|
struct napi_struct napi;
|
||||||
struct multi_send_data msd;
|
struct multi_send_data msd;
|
||||||
struct multi_recv_comp mrc;
|
struct multi_recv_comp mrc;
|
||||||
|
|
|
@ -1173,7 +1173,6 @@ static int netvsc_process_raw_pkt(struct hv_device *device,
|
||||||
struct vmbus_channel *channel,
|
struct vmbus_channel *channel,
|
||||||
struct netvsc_device *net_device,
|
struct netvsc_device *net_device,
|
||||||
struct net_device *ndev,
|
struct net_device *ndev,
|
||||||
u64 request_id,
|
|
||||||
const struct vmpacket_descriptor *desc)
|
const struct vmpacket_descriptor *desc)
|
||||||
{
|
{
|
||||||
struct net_device_context *net_device_ctx = netdev_priv(ndev);
|
struct net_device_context *net_device_ctx = netdev_priv(ndev);
|
||||||
|
@ -1195,7 +1194,7 @@ static int netvsc_process_raw_pkt(struct hv_device *device,
|
||||||
|
|
||||||
default:
|
default:
|
||||||
netdev_err(ndev, "unhandled packet type %d, tid %llx\n",
|
netdev_err(ndev, "unhandled packet type %d, tid %llx\n",
|
||||||
desc->type, request_id);
|
desc->type, desc->trans_id);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1222,28 +1221,20 @@ int netvsc_poll(struct napi_struct *napi, int budget)
|
||||||
u16 q_idx = channel->offermsg.offer.sub_channel_index;
|
u16 q_idx = channel->offermsg.offer.sub_channel_index;
|
||||||
struct net_device *ndev = hv_get_drvdata(device);
|
struct net_device *ndev = hv_get_drvdata(device);
|
||||||
struct netvsc_device *net_device = net_device_to_netvsc_device(ndev);
|
struct netvsc_device *net_device = net_device_to_netvsc_device(ndev);
|
||||||
const struct vmpacket_descriptor *desc;
|
|
||||||
int work_done = 0;
|
int work_done = 0;
|
||||||
|
|
||||||
desc = hv_pkt_iter_first(channel);
|
/* If starting a new interval */
|
||||||
while (desc) {
|
if (!nvchan->desc)
|
||||||
int count;
|
nvchan->desc = hv_pkt_iter_first(channel);
|
||||||
|
|
||||||
count = netvsc_process_raw_pkt(device, channel, net_device,
|
while (nvchan->desc && work_done < budget) {
|
||||||
ndev, desc->trans_id, desc);
|
work_done += netvsc_process_raw_pkt(device, channel, net_device,
|
||||||
work_done += count;
|
ndev, nvchan->desc);
|
||||||
desc = __hv_pkt_iter_next(channel, desc);
|
nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
|
||||||
|
|
||||||
/* If receive packet budget is exhausted, reschedule */
|
|
||||||
if (work_done >= budget) {
|
|
||||||
work_done = budget;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
hv_pkt_iter_close(channel);
|
|
||||||
|
|
||||||
/* If budget was not exhausted and
|
/* If receive ring was exhausted
|
||||||
* not doing busy poll
|
* and not doing busy poll
|
||||||
* then re-enable host interrupts
|
* then re-enable host interrupts
|
||||||
* and reschedule if ring is not empty.
|
* and reschedule if ring is not empty.
|
||||||
*/
|
*/
|
||||||
|
@ -1253,7 +1244,9 @@ int netvsc_poll(struct napi_struct *napi, int budget)
|
||||||
napi_reschedule(napi);
|
napi_reschedule(napi);
|
||||||
|
|
||||||
netvsc_chk_recv_comp(net_device, channel, q_idx);
|
netvsc_chk_recv_comp(net_device, channel, q_idx);
|
||||||
return work_done;
|
|
||||||
|
/* Driver may overshoot since multiple packets per descriptor */
|
||||||
|
return min(work_done, budget);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Call back when data is available in host ring buffer.
|
/* Call back when data is available in host ring buffer.
|
||||||
|
@ -1263,10 +1256,12 @@ void netvsc_channel_cb(void *context)
|
||||||
{
|
{
|
||||||
struct netvsc_channel *nvchan = context;
|
struct netvsc_channel *nvchan = context;
|
||||||
|
|
||||||
/* disable interupts from host */
|
if (napi_schedule_prep(&nvchan->napi)) {
|
||||||
hv_begin_read(&nvchan->channel->inbound);
|
/* disable interupts from host */
|
||||||
|
hv_begin_read(&nvchan->channel->inbound);
|
||||||
|
|
||||||
napi_schedule(&nvchan->napi);
|
__napi_schedule(&nvchan->napi);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Reference in a new issue