Merge git://github.com/davem330/net

* git://github.com/davem330/net:
  net: fix typos in Documentation/networking/scaling.txt
  bridge: leave carrier on for empty bridge
  netfilter: Use proper rwlock init function
  tcp: properly update lost_cnt_hint during shifting
  tcp: properly handle md5sig_pool references
  macvlan/macvtap: Fix unicast between macvtap interfaces in bridge mode
This commit is contained in:
Linus Torvalds 2011-10-06 16:15:10 -07:00
commit 3ee72ca992
7 changed files with 20 additions and 20 deletions

View file

@ -27,7 +27,7 @@ applying a filter to each packet that assigns it to one of a small number
of logical flows. Packets for each flow are steered to a separate receive of logical flows. Packets for each flow are steered to a separate receive
queue, which in turn can be processed by separate CPUs. This mechanism is queue, which in turn can be processed by separate CPUs. This mechanism is
generally known as “Receive-side Scaling” (RSS). The goal of RSS and generally known as “Receive-side Scaling” (RSS). The goal of RSS and
the other scaling techniques to increase performance uniformly. the other scaling techniques is to increase performance uniformly.
Multi-queue distribution can also be used for traffic prioritization, but Multi-queue distribution can also be used for traffic prioritization, but
that is not the focus of these techniques. that is not the focus of these techniques.
@ -186,10 +186,10 @@ are steered using plain RPS. Multiple table entries may point to the
same CPU. Indeed, with many flows and few CPUs, it is very likely that same CPU. Indeed, with many flows and few CPUs, it is very likely that
a single application thread handles flows with many different flow hashes. a single application thread handles flows with many different flow hashes.
rps_sock_table is a global flow table that contains the *desired* CPU for rps_sock_flow_table is a global flow table that contains the *desired* CPU
flows: the CPU that is currently processing the flow in userspace. Each for flows: the CPU that is currently processing the flow in userspace.
table value is a CPU index that is updated during calls to recvmsg and Each table value is a CPU index that is updated during calls to recvmsg
sendmsg (specifically, inet_recvmsg(), inet_sendmsg(), inet_sendpage() and sendmsg (specifically, inet_recvmsg(), inet_sendmsg(), inet_sendpage()
and tcp_splice_read()). and tcp_splice_read()).
When the scheduler moves a thread to a new CPU while it has outstanding When the scheduler moves a thread to a new CPU while it has outstanding

View file

@ -239,7 +239,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
dest = macvlan_hash_lookup(port, eth->h_dest); dest = macvlan_hash_lookup(port, eth->h_dest);
if (dest && dest->mode == MACVLAN_MODE_BRIDGE) { if (dest && dest->mode == MACVLAN_MODE_BRIDGE) {
/* send to lowerdev first for its network taps */ /* send to lowerdev first for its network taps */
vlan->forward(vlan->lowerdev, skb); dev_forward_skb(vlan->lowerdev, skb);
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }

View file

@ -91,7 +91,6 @@ static int br_dev_open(struct net_device *dev)
{ {
struct net_bridge *br = netdev_priv(dev); struct net_bridge *br = netdev_priv(dev);
netif_carrier_off(dev);
netdev_update_features(dev); netdev_update_features(dev);
netif_start_queue(dev); netif_start_queue(dev);
br_stp_enable_bridge(br); br_stp_enable_bridge(br);
@ -108,8 +107,6 @@ static int br_dev_stop(struct net_device *dev)
{ {
struct net_bridge *br = netdev_priv(dev); struct net_bridge *br = netdev_priv(dev);
netif_carrier_off(dev);
br_stp_disable_bridge(br); br_stp_disable_bridge(br);
br_multicast_stop(br); br_multicast_stop(br);

View file

@ -1389,9 +1389,7 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
BUG_ON(!pcount); BUG_ON(!pcount);
/* Tweak before seqno plays */ if (skb == tp->lost_skb_hint)
if (!tcp_is_fack(tp) && tcp_is_sack(tp) && tp->lost_skb_hint &&
!before(TCP_SKB_CB(tp->lost_skb_hint)->seq, TCP_SKB_CB(skb)->seq))
tp->lost_cnt_hint += pcount; tp->lost_cnt_hint += pcount;
TCP_SKB_CB(prev)->end_seq += shifted; TCP_SKB_CB(prev)->end_seq += shifted;

View file

@ -927,18 +927,21 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
} }
sk_nocaps_add(sk, NETIF_F_GSO_MASK); sk_nocaps_add(sk, NETIF_F_GSO_MASK);
} }
if (tcp_alloc_md5sig_pool(sk) == NULL) {
md5sig = tp->md5sig_info;
if (md5sig->entries4 == 0 &&
tcp_alloc_md5sig_pool(sk) == NULL) {
kfree(newkey); kfree(newkey);
return -ENOMEM; return -ENOMEM;
} }
md5sig = tp->md5sig_info;
if (md5sig->alloced4 == md5sig->entries4) { if (md5sig->alloced4 == md5sig->entries4) {
keys = kmalloc((sizeof(*keys) * keys = kmalloc((sizeof(*keys) *
(md5sig->entries4 + 1)), GFP_ATOMIC); (md5sig->entries4 + 1)), GFP_ATOMIC);
if (!keys) { if (!keys) {
kfree(newkey); kfree(newkey);
tcp_free_md5sig_pool(); if (md5sig->entries4 == 0)
tcp_free_md5sig_pool();
return -ENOMEM; return -ENOMEM;
} }
@ -982,6 +985,7 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
kfree(tp->md5sig_info->keys4); kfree(tp->md5sig_info->keys4);
tp->md5sig_info->keys4 = NULL; tp->md5sig_info->keys4 = NULL;
tp->md5sig_info->alloced4 = 0; tp->md5sig_info->alloced4 = 0;
tcp_free_md5sig_pool();
} else if (tp->md5sig_info->entries4 != i) { } else if (tp->md5sig_info->entries4 != i) {
/* Need to do some manipulation */ /* Need to do some manipulation */
memmove(&tp->md5sig_info->keys4[i], memmove(&tp->md5sig_info->keys4[i],
@ -989,7 +993,6 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
(tp->md5sig_info->entries4 - i) * (tp->md5sig_info->entries4 - i) *
sizeof(struct tcp4_md5sig_key)); sizeof(struct tcp4_md5sig_key));
} }
tcp_free_md5sig_pool();
return 0; return 0;
} }
} }

View file

@ -591,7 +591,8 @@ static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
} }
sk_nocaps_add(sk, NETIF_F_GSO_MASK); sk_nocaps_add(sk, NETIF_F_GSO_MASK);
} }
if (tcp_alloc_md5sig_pool(sk) == NULL) { if (tp->md5sig_info->entries6 == 0 &&
tcp_alloc_md5sig_pool(sk) == NULL) {
kfree(newkey); kfree(newkey);
return -ENOMEM; return -ENOMEM;
} }
@ -600,8 +601,9 @@ static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
(tp->md5sig_info->entries6 + 1)), GFP_ATOMIC); (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
if (!keys) { if (!keys) {
tcp_free_md5sig_pool();
kfree(newkey); kfree(newkey);
if (tp->md5sig_info->entries6 == 0)
tcp_free_md5sig_pool();
return -ENOMEM; return -ENOMEM;
} }
@ -647,6 +649,7 @@ static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer)
kfree(tp->md5sig_info->keys6); kfree(tp->md5sig_info->keys6);
tp->md5sig_info->keys6 = NULL; tp->md5sig_info->keys6 = NULL;
tp->md5sig_info->alloced6 = 0; tp->md5sig_info->alloced6 = 0;
tcp_free_md5sig_pool();
} else { } else {
/* shrink the database */ /* shrink the database */
if (tp->md5sig_info->entries6 != i) if (tp->md5sig_info->entries6 != i)
@ -655,7 +658,6 @@ static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer)
(tp->md5sig_info->entries6 - i) (tp->md5sig_info->entries6 - i)
* sizeof (tp->md5sig_info->keys6[0])); * sizeof (tp->md5sig_info->keys6[0]));
} }
tcp_free_md5sig_pool();
return 0; return 0;
} }
} }

View file

@ -3679,7 +3679,7 @@ int __net_init ip_vs_control_net_init(struct net *net)
int idx; int idx;
struct netns_ipvs *ipvs = net_ipvs(net); struct netns_ipvs *ipvs = net_ipvs(net);
ipvs->rs_lock = __RW_LOCK_UNLOCKED(ipvs->rs_lock); rwlock_init(&ipvs->rs_lock);
/* Initialize rs_table */ /* Initialize rs_table */
for (idx = 0; idx < IP_VS_RTAB_SIZE; idx++) for (idx = 0; idx < IP_VS_RTAB_SIZE; idx++)