mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-30 08:02:30 +00:00
Merge branch 'hv_netvsc-minor-changes'
Stephen Hemminger says: ==================== hv_netvsc: minor changes This includes minor cleanup of code in send and receive path and also a new statistic to check for allocation failures. This also eliminates some of the extra RCU when not needed. There is a theoritical bug where buffered data could be blocked for longer than necessary if the ring buffer got full. This has not been seen in the wild, found by inspection. The reference count between net device and internal RNDIS is not needed. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
f4d87ad2a3
4 changed files with 47 additions and 68 deletions
|
@ -194,14 +194,15 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
|
|||
const struct netvsc_device_info *info);
|
||||
int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx);
|
||||
void netvsc_device_remove(struct hv_device *device);
|
||||
int netvsc_send(struct net_device_context *ndc,
|
||||
int netvsc_send(struct net_device *net,
|
||||
struct hv_netvsc_packet *packet,
|
||||
struct rndis_message *rndis_msg,
|
||||
struct hv_page_buffer *page_buffer,
|
||||
struct sk_buff *skb);
|
||||
void netvsc_linkstatus_callback(struct hv_device *device_obj,
|
||||
void netvsc_linkstatus_callback(struct net_device *net,
|
||||
struct rndis_message *resp);
|
||||
int netvsc_recv_callback(struct net_device *net,
|
||||
struct netvsc_device *nvdev,
|
||||
struct vmbus_channel *channel,
|
||||
void *data, u32 len,
|
||||
const struct ndis_tcp_ip_checksum_info *csum_info,
|
||||
|
@ -222,7 +223,6 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev,
|
|||
const u8 *key);
|
||||
int rndis_filter_receive(struct net_device *ndev,
|
||||
struct netvsc_device *net_dev,
|
||||
struct hv_device *dev,
|
||||
struct vmbus_channel *channel,
|
||||
void *data, u32 buflen);
|
||||
|
||||
|
@ -705,6 +705,7 @@ struct netvsc_ethtool_stats {
|
|||
unsigned long tx_busy;
|
||||
unsigned long tx_send_full;
|
||||
unsigned long rx_comp_busy;
|
||||
unsigned long rx_no_memory;
|
||||
unsigned long stop_queue;
|
||||
unsigned long wake_queue;
|
||||
};
|
||||
|
@ -822,8 +823,6 @@ struct netvsc_device {
|
|||
u32 max_pkt; /* max number of pkt in one send, e.g. 8 */
|
||||
u32 pkt_align; /* alignment bytes, e.g. 8 */
|
||||
|
||||
atomic_t open_cnt;
|
||||
|
||||
struct netvsc_channel chan_table[VRSS_CHANNEL_MAX];
|
||||
|
||||
struct rcu_head rcu;
|
||||
|
|
|
@ -73,7 +73,7 @@ static struct netvsc_device *alloc_net_device(void)
|
|||
|
||||
init_waitqueue_head(&net_device->wait_drain);
|
||||
net_device->destroy = false;
|
||||
atomic_set(&net_device->open_cnt, 0);
|
||||
|
||||
net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
|
||||
net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
|
||||
|
||||
|
@ -701,19 +701,18 @@ static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
|
|||
return NETVSC_INVALID_INDEX;
|
||||
}
|
||||
|
||||
static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
|
||||
unsigned int section_index,
|
||||
u32 pend_size,
|
||||
struct hv_netvsc_packet *packet,
|
||||
struct rndis_message *rndis_msg,
|
||||
struct hv_page_buffer *pb,
|
||||
struct sk_buff *skb)
|
||||
static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
|
||||
unsigned int section_index,
|
||||
u32 pend_size,
|
||||
struct hv_netvsc_packet *packet,
|
||||
struct rndis_message *rndis_msg,
|
||||
struct hv_page_buffer *pb,
|
||||
bool xmit_more)
|
||||
{
|
||||
char *start = net_device->send_buf;
|
||||
char *dest = start + (section_index * net_device->send_section_size)
|
||||
+ pend_size;
|
||||
int i;
|
||||
u32 msg_size = 0;
|
||||
u32 padding = 0;
|
||||
u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
|
||||
packet->page_buf_cnt;
|
||||
|
@ -721,7 +720,7 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
|
|||
|
||||
/* Add padding */
|
||||
remain = packet->total_data_buflen & (net_device->pkt_align - 1);
|
||||
if (skb->xmit_more && remain && !packet->cp_partial) {
|
||||
if (xmit_more && remain) {
|
||||
padding = net_device->pkt_align - remain;
|
||||
rndis_msg->msg_len += padding;
|
||||
packet->total_data_buflen += padding;
|
||||
|
@ -733,16 +732,11 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
|
|||
u32 len = pb[i].len;
|
||||
|
||||
memcpy(dest, (src + offset), len);
|
||||
msg_size += len;
|
||||
dest += len;
|
||||
}
|
||||
|
||||
if (padding) {
|
||||
if (padding)
|
||||
memset(dest, 0, padding);
|
||||
msg_size += padding;
|
||||
}
|
||||
|
||||
return msg_size;
|
||||
}
|
||||
|
||||
static inline int netvsc_send_pkt(
|
||||
|
@ -835,12 +829,13 @@ static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
|
|||
}
|
||||
|
||||
/* RCU already held by caller */
|
||||
int netvsc_send(struct net_device_context *ndev_ctx,
|
||||
int netvsc_send(struct net_device *ndev,
|
||||
struct hv_netvsc_packet *packet,
|
||||
struct rndis_message *rndis_msg,
|
||||
struct hv_page_buffer *pb,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct net_device_context *ndev_ctx = netdev_priv(ndev);
|
||||
struct netvsc_device *net_device
|
||||
= rcu_dereference_bh(ndev_ctx->nvdev);
|
||||
struct hv_device *device = ndev_ctx->device_ctx;
|
||||
|
@ -851,7 +846,7 @@ int netvsc_send(struct net_device_context *ndev_ctx,
|
|||
struct multi_send_data *msdp;
|
||||
struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
|
||||
struct sk_buff *msd_skb = NULL;
|
||||
bool try_batch;
|
||||
bool try_batch, xmit_more;
|
||||
|
||||
/* If device is rescinded, return error and packet will get dropped. */
|
||||
if (unlikely(!net_device || net_device->destroy))
|
||||
|
@ -902,10 +897,17 @@ int netvsc_send(struct net_device_context *ndev_ctx,
|
|||
}
|
||||
}
|
||||
|
||||
/* Keep aggregating only if stack says more data is coming
|
||||
* and not doing mixed modes send and not flow blocked
|
||||
*/
|
||||
xmit_more = skb->xmit_more &&
|
||||
!packet->cp_partial &&
|
||||
!netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx));
|
||||
|
||||
if (section_index != NETVSC_INVALID_INDEX) {
|
||||
netvsc_copy_to_send_buf(net_device,
|
||||
section_index, msd_len,
|
||||
packet, rndis_msg, pb, skb);
|
||||
packet, rndis_msg, pb, xmit_more);
|
||||
|
||||
packet->send_buf_index = section_index;
|
||||
|
||||
|
@ -925,7 +927,7 @@ int netvsc_send(struct net_device_context *ndev_ctx,
|
|||
if (msdp->skb)
|
||||
dev_consume_skb_any(msdp->skb);
|
||||
|
||||
if (skb->xmit_more && !packet->cp_partial) {
|
||||
if (xmit_more) {
|
||||
msdp->skb = skb;
|
||||
msdp->pkt = packet;
|
||||
msdp->count++;
|
||||
|
@ -1088,7 +1090,7 @@ static int netvsc_receive(struct net_device *ndev,
|
|||
u32 buflen = vmxferpage_packet->ranges[i].byte_count;
|
||||
|
||||
/* Pass it to the upper layer */
|
||||
status = rndis_filter_receive(ndev, net_device, device,
|
||||
status = rndis_filter_receive(ndev, net_device,
|
||||
channel, data, buflen);
|
||||
}
|
||||
|
||||
|
|
|
@ -626,7 +626,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
|
|||
/* timestamp packet in software */
|
||||
skb_tx_timestamp(skb);
|
||||
|
||||
ret = netvsc_send(net_device_ctx, packet, rndis_msg, pb, skb);
|
||||
ret = netvsc_send(net, packet, rndis_msg, pb, skb);
|
||||
if (likely(ret == 0))
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
|
@ -652,22 +652,14 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
|
|||
/*
|
||||
* netvsc_linkstatus_callback - Link up/down notification
|
||||
*/
|
||||
void netvsc_linkstatus_callback(struct hv_device *device_obj,
|
||||
void netvsc_linkstatus_callback(struct net_device *net,
|
||||
struct rndis_message *resp)
|
||||
{
|
||||
struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
|
||||
struct net_device *net;
|
||||
struct net_device_context *ndev_ctx;
|
||||
struct net_device_context *ndev_ctx = netdev_priv(net);
|
||||
struct netvsc_reconfig *event;
|
||||
unsigned long flags;
|
||||
|
||||
net = hv_get_drvdata(device_obj);
|
||||
|
||||
if (!net)
|
||||
return;
|
||||
|
||||
ndev_ctx = netdev_priv(net);
|
||||
|
||||
/* Update the physical link speed when changing to another vSwitch */
|
||||
if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) {
|
||||
u32 speed;
|
||||
|
@ -747,34 +739,26 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
|
|||
* "wire" on the specified device.
|
||||
*/
|
||||
int netvsc_recv_callback(struct net_device *net,
|
||||
struct netvsc_device *net_device,
|
||||
struct vmbus_channel *channel,
|
||||
void *data, u32 len,
|
||||
const struct ndis_tcp_ip_checksum_info *csum_info,
|
||||
const struct ndis_pkt_8021q_info *vlan)
|
||||
{
|
||||
struct net_device_context *net_device_ctx = netdev_priv(net);
|
||||
struct netvsc_device *net_device;
|
||||
u16 q_idx = channel->offermsg.offer.sub_channel_index;
|
||||
struct netvsc_channel *nvchan;
|
||||
struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
|
||||
struct sk_buff *skb;
|
||||
struct netvsc_stats *rx_stats;
|
||||
|
||||
if (net->reg_state != NETREG_REGISTERED)
|
||||
return NVSP_STAT_FAIL;
|
||||
|
||||
rcu_read_lock();
|
||||
net_device = rcu_dereference(net_device_ctx->nvdev);
|
||||
if (unlikely(!net_device))
|
||||
goto drop;
|
||||
|
||||
nvchan = &net_device->chan_table[q_idx];
|
||||
|
||||
/* Allocate a skb - TODO direct I/O to pages? */
|
||||
skb = netvsc_alloc_recv_skb(net, &nvchan->napi,
|
||||
csum_info, vlan, data, len);
|
||||
if (unlikely(!skb)) {
|
||||
drop:
|
||||
++net->stats.rx_dropped;
|
||||
++net_device_ctx->eth_stats.rx_no_memory;
|
||||
rcu_read_unlock();
|
||||
return NVSP_STAT_FAIL;
|
||||
}
|
||||
|
@ -798,8 +782,6 @@ int netvsc_recv_callback(struct net_device *net,
|
|||
u64_stats_update_end(&rx_stats->syncp);
|
||||
|
||||
napi_gro_receive(&nvchan->napi, skb);
|
||||
rcu_read_unlock();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1125,12 +1107,13 @@ static const struct {
|
|||
u16 offset;
|
||||
} netvsc_stats[] = {
|
||||
{ "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) },
|
||||
{ "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) },
|
||||
{ "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) },
|
||||
{ "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) },
|
||||
{ "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) },
|
||||
{ "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) },
|
||||
{ "tx_send_full", offsetof(struct netvsc_ethtool_stats, tx_send_full) },
|
||||
{ "rx_comp_busy", offsetof(struct netvsc_ethtool_stats, rx_comp_busy) },
|
||||
{ "rx_no_memory", offsetof(struct netvsc_ethtool_stats, rx_no_memory) },
|
||||
{ "stop_queue", offsetof(struct netvsc_ethtool_stats, stop_queue) },
|
||||
{ "wake_queue", offsetof(struct netvsc_ethtool_stats, wake_queue) },
|
||||
}, vf_stats[] = {
|
||||
|
|
|
@ -134,11 +134,9 @@ static void put_rndis_request(struct rndis_device *dev,
|
|||
kfree(req);
|
||||
}
|
||||
|
||||
static void dump_rndis_message(struct hv_device *hv_dev,
|
||||
static void dump_rndis_message(struct net_device *netdev,
|
||||
const struct rndis_message *rndis_msg)
|
||||
{
|
||||
struct net_device *netdev = hv_get_drvdata(hv_dev);
|
||||
|
||||
switch (rndis_msg->ndis_msg_type) {
|
||||
case RNDIS_MSG_PACKET:
|
||||
netdev_dbg(netdev, "RNDIS_MSG_PACKET (len %u, "
|
||||
|
@ -217,7 +215,6 @@ static int rndis_filter_send_request(struct rndis_device *dev,
|
|||
struct hv_netvsc_packet *packet;
|
||||
struct hv_page_buffer page_buf[2];
|
||||
struct hv_page_buffer *pb = page_buf;
|
||||
struct net_device_context *net_device_ctx = netdev_priv(dev->ndev);
|
||||
int ret;
|
||||
|
||||
/* Setup the packet to send it */
|
||||
|
@ -245,7 +242,7 @@ static int rndis_filter_send_request(struct rndis_device *dev,
|
|||
}
|
||||
|
||||
rcu_read_lock_bh();
|
||||
ret = netvsc_send(net_device_ctx, packet, NULL, pb, NULL);
|
||||
ret = netvsc_send(dev->ndev, packet, NULL, pb, NULL);
|
||||
rcu_read_unlock_bh();
|
||||
|
||||
return ret;
|
||||
|
@ -354,6 +351,7 @@ static inline void *rndis_get_ppi(struct rndis_packet *rpkt, u32 type)
|
|||
}
|
||||
|
||||
static int rndis_filter_receive_data(struct net_device *ndev,
|
||||
struct netvsc_device *nvdev,
|
||||
struct rndis_device *dev,
|
||||
struct rndis_message *msg,
|
||||
struct vmbus_channel *channel,
|
||||
|
@ -390,14 +388,14 @@ static int rndis_filter_receive_data(struct net_device *ndev,
|
|||
*/
|
||||
data = (void *)((unsigned long)data + data_offset);
|
||||
csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO);
|
||||
return netvsc_recv_callback(ndev, channel,
|
||||
|
||||
return netvsc_recv_callback(ndev, nvdev, channel,
|
||||
data, rndis_pkt->data_len,
|
||||
csum_info, vlan);
|
||||
}
|
||||
|
||||
int rndis_filter_receive(struct net_device *ndev,
|
||||
struct netvsc_device *net_dev,
|
||||
struct hv_device *dev,
|
||||
struct vmbus_channel *channel,
|
||||
void *data, u32 buflen)
|
||||
{
|
||||
|
@ -419,11 +417,12 @@ int rndis_filter_receive(struct net_device *ndev,
|
|||
}
|
||||
|
||||
if (netif_msg_rx_status(net_device_ctx))
|
||||
dump_rndis_message(dev, rndis_msg);
|
||||
dump_rndis_message(ndev, rndis_msg);
|
||||
|
||||
switch (rndis_msg->ndis_msg_type) {
|
||||
case RNDIS_MSG_PACKET:
|
||||
return rndis_filter_receive_data(ndev, rndis_dev, rndis_msg,
|
||||
return rndis_filter_receive_data(ndev, net_dev,
|
||||
rndis_dev, rndis_msg,
|
||||
channel, data, buflen);
|
||||
case RNDIS_MSG_INIT_C:
|
||||
case RNDIS_MSG_QUERY_C:
|
||||
|
@ -434,7 +433,7 @@ int rndis_filter_receive(struct net_device *ndev,
|
|||
|
||||
case RNDIS_MSG_INDICATE:
|
||||
/* notification msgs */
|
||||
netvsc_linkstatus_callback(dev, rndis_msg);
|
||||
netvsc_linkstatus_callback(ndev, rndis_msg);
|
||||
break;
|
||||
default:
|
||||
netdev_err(ndev,
|
||||
|
@ -1362,9 +1361,6 @@ int rndis_filter_open(struct netvsc_device *nvdev)
|
|||
if (!nvdev)
|
||||
return -EINVAL;
|
||||
|
||||
if (atomic_inc_return(&nvdev->open_cnt) != 1)
|
||||
return 0;
|
||||
|
||||
return rndis_filter_open_device(nvdev->extension);
|
||||
}
|
||||
|
||||
|
@ -1373,13 +1369,12 @@ int rndis_filter_close(struct netvsc_device *nvdev)
|
|||
if (!nvdev)
|
||||
return -EINVAL;
|
||||
|
||||
if (atomic_dec_return(&nvdev->open_cnt) != 0)
|
||||
return 0;
|
||||
|
||||
return rndis_filter_close_device(nvdev->extension);
|
||||
}
|
||||
|
||||
bool rndis_filter_opened(const struct netvsc_device *nvdev)
|
||||
{
|
||||
return atomic_read(&nvdev->open_cnt) > 0;
|
||||
const struct rndis_device *dev = nvdev->extension;
|
||||
|
||||
return dev->state == RNDIS_DEV_DATAINITIALIZED;
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue