linux-stable/drivers/net/loopback.c

281 lines
7.1 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0-or-later
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* Pseudo-driver for the loopback interface.
*
* Version: @(#)loopback.c 1.0.4b 08/16/93
*
* Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Donald Becker, <becker@scyld.com>
*
* Alan Cox : Fixed oddments for NET3.014
* Alan Cox : Rejig for NET3.029 snap #3
* Alan Cox : Fixed NET3.029 bugs and sped up
* Larry McVoy : Tiny tweak to double performance
* Alan Cox : Backed out LMV's tweak - the linux mm
* can't take it...
* Michael Griffith: Don't bother computing the checksums
* on packets received on the loopback
* interface.
* Alexey Kuznetsov: Potential hang under some extreme
* cases removed.
*/
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/socket.h>
#include <linux/errno.h>
#include <linux/fcntl.h>
#include <linux/in.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/ethtool.h>
#include <net/sch_generic.h>
#include <net/sock.h>
#include <net/checksum.h>
#include <linux/if_ether.h> /* For the statistics structure. */
#include <linux/if_arp.h> /* For ARPHRD_ETHER */
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/percpu.h>
#include <linux/net_tstamp.h>
#include <net/net_namespace.h>
#include <linux/u64_stats_sync.h>
/* blackhole_netdev - a device used for dsts that are marked expired!
* This is global device (instead of per-net-ns) since it's not needed
* to be per-ns and gets initialized at boot time.
*/
struct net_device *blackhole_netdev;
EXPORT_SYMBOL(blackhole_netdev);
/* The higher levels take care of making this non-reentrant (it's
* called with bh's disabled).
*/
static netdev_tx_t loopback_xmit(struct sk_buff *skb,
struct net_device *dev)
{
int len;
skb_tx_timestamp(skb);
/* do not fool net_timestamp_check() with various clock bases */
skb_clear_tstamp(skb);
skb_orphan(skb);
net: dev: Makes sure netif_rx() can be invoked in any context. Dave suggested a while ago (eleven years by now) "Let's make netif_rx() work in all contexts and get rid of netif_rx_ni()". Eric agreed and pointed out that modern devices should use netif_receive_skb() to avoid the overhead. In the meantime someone added another variant, netif_rx_any_context(), which behaves as suggested. netif_rx() must be invoked with disabled bottom halves to ensure that pending softirqs, which were raised within the function, are handled. netif_rx_ni() can be invoked only from process context (bottom halves must be enabled) because the function handles pending softirqs without checking if bottom halves were disabled or not. netif_rx_any_context() invokes on the former functions by checking in_interrupts(). netif_rx() could be taught to handle both cases (disabled and enabled bottom halves) by simply disabling bottom halves while invoking netif_rx_internal(). The local_bh_enable() invocation will then invoke pending softirqs only if the BH-disable counter drops to zero. Eric is concerned about the overhead of BH-disable+enable especially in regard to the loopback driver. As critical as this driver is, it will receive a shortcut to avoid the additional overhead which is not needed. Add a local_bh_disable() section in netif_rx() to ensure softirqs are handled if needed. Provide __netif_rx() which does not disable BH and has a lockdep assert to ensure that interrupts are disabled. Use this shortcut in the loopback driver and in drivers/net/*.c. Make netif_rx_ni() and netif_rx_any_context() invoke netif_rx() so they can be removed once they are no more users left. Link: https://lkml.kernel.org/r/20100415.020246.218622820.davem@davemloft.net Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Reviewed-by: Eric Dumazet <edumazet@google.com> Reviewed-by: Toke Høiland-Jørgensen <toke@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2022-02-11 23:38:38 +00:00
/* Before queueing this packet to __netif_rx(),
net: loopback: fix a dst refcounting issue Ben Greear reported crashes in ip_rcv_finish() on a stress test involving many macvlans. We tracked the bug to a dst use after free. ip_rcv_finish() was calling dst->input() and got garbage for dst->input value. It appears the bug is in loopback driver, lacking a skb_dst_force() before calling netif_rx(). As a result, a non refcounted dst, normally protected by a RCU read_lock section, was escaping this section and could be freed before the packet being processed. [<ffffffff813a3c4d>] loopback_xmit+0x64/0x83 [<ffffffff81477364>] dev_hard_start_xmit+0x26c/0x35e [<ffffffff8147771a>] dev_queue_xmit+0x2c4/0x37c [<ffffffff81477456>] ? dev_hard_start_xmit+0x35e/0x35e [<ffffffff8148cfa6>] ? eth_header+0x28/0xb6 [<ffffffff81480f09>] neigh_resolve_output+0x176/0x1a7 [<ffffffff814ad835>] ip_finish_output2+0x297/0x30d [<ffffffff814ad6d5>] ? ip_finish_output2+0x137/0x30d [<ffffffff814ad90e>] ip_finish_output+0x63/0x68 [<ffffffff814ae412>] ip_output+0x61/0x67 [<ffffffff814ab904>] dst_output+0x17/0x1b [<ffffffff814adb6d>] ip_local_out+0x1e/0x23 [<ffffffff814ae1c4>] ip_queue_xmit+0x315/0x353 [<ffffffff814adeaf>] ? ip_send_unicast_reply+0x2cc/0x2cc [<ffffffff814c018f>] tcp_transmit_skb+0x7ca/0x80b [<ffffffff814c3571>] tcp_connect+0x53c/0x587 [<ffffffff810c2f0c>] ? getnstimeofday+0x44/0x7d [<ffffffff810c2f56>] ? ktime_get_real+0x11/0x3e [<ffffffff814c6f9b>] tcp_v4_connect+0x3c2/0x431 [<ffffffff814d6913>] __inet_stream_connect+0x84/0x287 [<ffffffff814d6b38>] ? inet_stream_connect+0x22/0x49 [<ffffffff8108d695>] ? _local_bh_enable_ip+0x84/0x9f [<ffffffff8108d6c8>] ? local_bh_enable+0xd/0x11 [<ffffffff8146763c>] ? lock_sock_nested+0x6e/0x79 [<ffffffff814d6b38>] ? inet_stream_connect+0x22/0x49 [<ffffffff814d6b49>] inet_stream_connect+0x33/0x49 [<ffffffff814632c6>] sys_connect+0x75/0x98 This bug was introduced in linux-2.6.35, in commit 7fee226ad2397b (net: add a noref bit on skb dst) skb_dst_force() is enforced in dev_queue_xmit() for devices having a qdisc. Reported-by: Ben Greear <greearb@candelatech.com> Signed-off-by: Eric Dumazet <edumazet@google.com> Tested-by: Ben Greear <greearb@candelatech.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2013-01-25 07:44:41 +00:00
* make sure dst is refcounted.
*/
skb_dst_force(skb);
skb->protocol = eth_type_trans(skb, dev);
len = skb->len;
net: dev: Makes sure netif_rx() can be invoked in any context. Dave suggested a while ago (eleven years by now) "Let's make netif_rx() work in all contexts and get rid of netif_rx_ni()". Eric agreed and pointed out that modern devices should use netif_receive_skb() to avoid the overhead. In the meantime someone added another variant, netif_rx_any_context(), which behaves as suggested. netif_rx() must be invoked with disabled bottom halves to ensure that pending softirqs, which were raised within the function, are handled. netif_rx_ni() can be invoked only from process context (bottom halves must be enabled) because the function handles pending softirqs without checking if bottom halves were disabled or not. netif_rx_any_context() invokes on the former functions by checking in_interrupts(). netif_rx() could be taught to handle both cases (disabled and enabled bottom halves) by simply disabling bottom halves while invoking netif_rx_internal(). The local_bh_enable() invocation will then invoke pending softirqs only if the BH-disable counter drops to zero. Eric is concerned about the overhead of BH-disable+enable especially in regard to the loopback driver. As critical as this driver is, it will receive a shortcut to avoid the additional overhead which is not needed. Add a local_bh_disable() section in netif_rx() to ensure softirqs are handled if needed. Provide __netif_rx() which does not disable BH and has a lockdep assert to ensure that interrupts are disabled. Use this shortcut in the loopback driver and in drivers/net/*.c. Make netif_rx_ni() and netif_rx_any_context() invoke netif_rx() so they can be removed once they are no more users left. Link: https://lkml.kernel.org/r/20100415.020246.218622820.davem@davemloft.net Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Reviewed-by: Eric Dumazet <edumazet@google.com> Reviewed-by: Toke Høiland-Jørgensen <toke@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2022-02-11 23:38:38 +00:00
if (likely(__netif_rx(skb) == NET_RX_SUCCESS))
dev_lstats_add(dev, len);
return NETDEV_TX_OK;
}
void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes)
{
int i;
*packets = 0;
*bytes = 0;
for_each_possible_cpu(i) {
const struct pcpu_lstats *lb_stats;
u64 tbytes, tpackets;
unsigned int start;
lb_stats = per_cpu_ptr(dev->lstats, i);
do {
start = u64_stats_fetch_begin(&lb_stats->syncp);
tpackets = u64_stats_read(&lb_stats->packets);
tbytes = u64_stats_read(&lb_stats->bytes);
} while (u64_stats_fetch_retry(&lb_stats->syncp, start));
*bytes += tbytes;
*packets += tpackets;
}
}
EXPORT_SYMBOL(dev_lstats_read);
static void loopback_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
u64 packets, bytes;
dev_lstats_read(dev, &packets, &bytes);
stats->rx_packets = packets;
stats->tx_packets = packets;
stats->rx_bytes = bytes;
stats->tx_bytes = bytes;
}
static u32 always_on(struct net_device *dev)
{
return 1;
}
static const struct ethtool_ops loopback_ethtool_ops = {
.get_link = always_on,
.get_ts_info = ethtool_op_get_ts_info,
};
static int loopback_dev_init(struct net_device *dev)
{
dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
if (!dev->lstats)
return -ENOMEM;
return 0;
}
static void loopback_dev_free(struct net_device *dev)
{
dev_net(dev)->loopback_dev = NULL;
free_percpu(dev->lstats);
}
static const struct net_device_ops loopback_ops = {
.ndo_init = loopback_dev_init,
.ndo_start_xmit = loopback_xmit,
.ndo_get_stats64 = loopback_get_stats64,
.ndo_set_mac_address = eth_mac_addr,
};
static void gen_lo_setup(struct net_device *dev,
unsigned int mtu,
const struct ethtool_ops *eth_ops,
const struct header_ops *hdr_ops,
const struct net_device_ops *dev_ops,
void (*dev_destructor)(struct net_device *dev))
{
dev->mtu = mtu;
dev->hard_header_len = ETH_HLEN; /* 14 */
dev->min_header_len = ETH_HLEN; /* 14 */
dev->addr_len = ETH_ALEN; /* 6 */
dev->type = ARPHRD_LOOPBACK; /* 0x0001*/
dev->flags = IFF_LOOPBACK;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
netif_keep_dst(dev);
dev->hw_features = NETIF_F_GSO_SOFTWARE;
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
| NETIF_F_GSO_SOFTWARE
| NETIF_F_HW_CSUM
| NETIF_F_RXCSUM
| NETIF_F_SCTP_CRC
| NETIF_F_HIGHDMA
| NETIF_F_LLTX
| NETIF_F_NETNS_LOCAL
| NETIF_F_VLAN_CHALLENGED
| NETIF_F_LOOPBACK;
dev->ethtool_ops = eth_ops;
dev->header_ops = hdr_ops;
dev->netdev_ops = dev_ops;
net: Fix inconsistent teardown and release of private netdev state. Network devices can allocate reasources and private memory using netdev_ops->ndo_init(). However, the release of these resources can occur in one of two different places. Either netdev_ops->ndo_uninit() or netdev->destructor(). The decision of which operation frees the resources depends upon whether it is necessary for all netdev refs to be released before it is safe to perform the freeing. netdev_ops->ndo_uninit() presumably can occur right after the NETDEV_UNREGISTER notifier completes and the unicast and multicast address lists are flushed. netdev->destructor(), on the other hand, does not run until the netdev references all go away. Further complicating the situation is that netdev->destructor() almost universally does also a free_netdev(). This creates a problem for the logic in register_netdevice(). Because all callers of register_netdevice() manage the freeing of the netdev, and invoke free_netdev(dev) if register_netdevice() fails. If netdev_ops->ndo_init() succeeds, but something else fails inside of register_netdevice(), it does call ndo_ops->ndo_uninit(). But it is not able to invoke netdev->destructor(). This is because netdev->destructor() will do a free_netdev() and then the caller of register_netdevice() will do the same. However, this means that the resources that would normally be released by netdev->destructor() will not be. Over the years drivers have added local hacks to deal with this, by invoking their destructor parts by hand when register_netdevice() fails. Many drivers do not try to deal with this, and instead we have leaks. Let's close this hole by formalizing the distinction between what private things need to be freed up by netdev->destructor() and whether the driver needs unregister_netdevice() to perform the free_netdev(). netdev->priv_destructor() performs all actions to free up the private resources that used to be freed by netdev->destructor(), except for free_netdev(). netdev->needs_free_netdev is a boolean that indicates whether free_netdev() should be done at the end of unregister_netdevice(). Now, register_netdevice() can sanely release all resources after ndo_ops->ndo_init() succeeds, by invoking both ndo_ops->ndo_uninit() and netdev->priv_destructor(). And at the end of unregister_netdevice(), we invoke netdev->priv_destructor() and optionally call free_netdev(). Signed-off-by: David S. Miller <davem@davemloft.net>
2017-05-08 16:52:56 +00:00
dev->needs_free_netdev = true;
dev->priv_destructor = dev_destructor;
net: loopback: enable BIG TCP packets Set the driver limit to GSO_MAX_SIZE (512 KB). This allows the admin/user to set a GSO limit up to this value. Tested: ip link set dev lo gso_max_size 200000 netperf -H ::1 -t TCP_RR -l 100 -- -r 80000,80000 & tcpdump shows : 18:28:42.962116 IP6 ::1 > ::1: HBH 40051 > 63780: Flags [P.], seq 3626480001:3626560001, ack 3626560001, win 17743, options [nop,nop,TS val 3771179265 ecr 3771179265], length 80000 18:28:42.962138 IP6 ::1.63780 > ::1.40051: Flags [.], ack 3626560001, win 17743, options [nop,nop,TS val 3771179265 ecr 3771179265], length 0 18:28:42.962152 IP6 ::1 > ::1: HBH 63780 > 40051: Flags [P.], seq 3626560001:3626640001, ack 3626560001, win 17743, options [nop,nop,TS val 3771179265 ecr 3771179265], length 80000 18:28:42.962157 IP6 ::1.40051 > ::1.63780: Flags [.], ack 3626640001, win 17743, options [nop,nop,TS val 3771179265 ecr 3771179265], length 0 18:28:42.962180 IP6 ::1 > ::1: HBH 40051 > 63780: Flags [P.], seq 3626560001:3626640001, ack 3626640001, win 17743, options [nop,nop,TS val 3771179265 ecr 3771179265], length 80000 18:28:42.962214 IP6 ::1.63780 > ::1.40051: Flags [.], ack 3626640001, win 17743, options [nop,nop,TS val 3771179266 ecr 3771179265], length 0 18:28:42.962228 IP6 ::1 > ::1: HBH 63780 > 40051: Flags [P.], seq 3626640001:3626720001, ack 3626640001, win 17743, options [nop,nop,TS val 3771179266 ecr 3771179265], length 80000 18:28:42.962233 IP6 ::1.40051 > ::1.63780: Flags [.], ack 3626720001, win 17743, options [nop,nop,TS val 3771179266 ecr 3771179266], length 0 Signed-off-by: Eric Dumazet <edumazet@google.com> Acked-by: Alexander Duyck <alexanderduyck@fb.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2022-05-13 18:34:05 +00:00
netif_set_tso_max_size(dev, GSO_MAX_SIZE);
}
/* The loopback device is special. There is only one instance
* per network namespace.
*/
static void loopback_setup(struct net_device *dev)
{
gen_lo_setup(dev, (64 * 1024), &loopback_ethtool_ops, &eth_header_ops,
&loopback_ops, loopback_dev_free);
}
/* Setup and register the loopback device. */
static __net_init int loopback_net_init(struct net *net)
{
struct net_device *dev;
int err;
err = -ENOMEM;
dev = alloc_netdev(0, "lo", NET_NAME_PREDICTABLE, loopback_setup);
if (!dev)
goto out;
dev_net_set(dev, net);
err = register_netdev(dev);
if (err)
goto out_free_netdev;
BUG_ON(dev->ifindex != LOOPBACK_IFINDEX);
net->loopback_dev = dev;
return 0;
out_free_netdev:
free_netdev(dev);
out:
if (net_eq(net, &init_net))
panic("loopback: Failed to register netdevice: %d\n", err);
return err;
}
/* Registered in net/core/dev.c */
struct pernet_operations __net_initdata loopback_net_ops = {
.init = loopback_net_init,
};
/* blackhole netdevice */
static netdev_tx_t blackhole_netdev_xmit(struct sk_buff *skb,
struct net_device *dev)
{
kfree_skb(skb);
net_warn_ratelimited("%s(): Dropping skb.\n", __func__);
return NETDEV_TX_OK;
}
static const struct net_device_ops blackhole_netdev_ops = {
.ndo_start_xmit = blackhole_netdev_xmit,
};
/* This is a dst-dummy device used specifically for invalidated
* DSTs and unlike loopback, this is not per-ns.
*/
static void blackhole_netdev_setup(struct net_device *dev)
{
gen_lo_setup(dev, ETH_MIN_MTU, NULL, NULL, &blackhole_netdev_ops, NULL);
}
/* Setup and register the blackhole_netdev. */
static int __init blackhole_netdev_init(void)
{
blackhole_netdev = alloc_netdev(0, "blackhole_dev", NET_NAME_UNKNOWN,
blackhole_netdev_setup);
if (!blackhole_netdev)
return -ENOMEM;
rtnl_lock();
dev_init_scheduler(blackhole_netdev);
dev_activate(blackhole_netdev);
rtnl_unlock();
blackhole_netdev->flags |= IFF_UP | IFF_RUNNING;
dev_net_set(blackhole_netdev, &init_net);
return 0;
}
device_initcall(blackhole_netdev_init);