Merge branch 'net-group-together-hot-data'

Eric Dumazet says:

====================
net: group together hot data

While our recent structure reorganizations were focused
on increasing max throughput, there is still an
area where improvements are much needed.

In many cases, a cpu handles one packet at a time,
instead of a nice batch.

Hardware interrupt.
 -> Software interrupt.
   -> Network/Protocol stacks.

If the cpu was idle or busy in other layers,
it has to pull many cache lines.

This series adds a new net_hotdata structure, where
some critical (and read-mostly) data used in
rx and tx path is packed in a small number of cache lines.

Synthetic benchmarks will not see much difference,
but latency of single packet should improve.

net_hodata current size on 64bit is 416 bytes,
but might grow in the future.

Also move RPS definitions to a new include file.
====================

Link: https://lore.kernel.org/r/20240306160031.874438-1-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2024-03-07 21:12:45 -08:00
commit e8bb2ccff7
44 changed files with 391 additions and 320 deletions

View File

@ -2,6 +2,7 @@
/* Copyright (C) 2018-2020, Intel Corporation. */
#include "ice.h"
#include <net/rps.h>
/**
* ice_is_arfs_active - helper to check is aRFS is active

View File

@ -42,6 +42,7 @@
#include <net/ip.h>
#include <net/vxlan.h>
#include <net/devlink.h>
#include <net/rps.h>
#include <linux/mlx4/driver.h>
#include <linux/mlx4/device.h>

View File

@ -34,6 +34,7 @@
#include <linux/mlx5/fs.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/rps.h>
#include "en.h"
#define ARFS_HASH_SHIFT BITS_PER_BYTE

View File

@ -11,6 +11,7 @@
#include "net_driver.h"
#include <linux/module.h>
#include <linux/iommu.h>
#include <net/rps.h>
#include "efx.h"
#include "nic.h"
#include "rx_common.h"

View File

@ -11,6 +11,7 @@
#include "net_driver.h"
#include <linux/module.h>
#include <linux/iommu.h>
#include <net/rps.h>
#include "efx.h"
#include "nic.h"
#include "rx_common.h"

View File

@ -78,6 +78,7 @@
#include <net/ax25.h>
#include <net/rose.h>
#include <net/6lowpan.h>
#include <net/rps.h>
#include <linux/uaccess.h>
#include <linux/proc_fs.h>

View File

@ -225,12 +225,6 @@ struct net_device_core_stats {
#include <linux/cache.h>
#include <linux/skbuff.h>
#ifdef CONFIG_RPS
#include <linux/static_key.h>
extern struct static_key_false rps_needed;
extern struct static_key_false rfs_needed;
#endif
struct neighbour;
struct neigh_parms;
struct sk_buff;
@ -730,86 +724,10 @@ static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node
#endif
}
#ifdef CONFIG_RPS
/*
* This structure holds an RPS map which can be of variable length. The
* map is an array of CPUs.
*/
struct rps_map {
unsigned int len;
struct rcu_head rcu;
u16 cpus[];
};
#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
/*
* The rps_dev_flow structure contains the mapping of a flow to a CPU, the
* tail pointer for that CPU's input queue at the time of last enqueue, and
* a hardware filter index.
*/
struct rps_dev_flow {
u16 cpu;
u16 filter;
unsigned int last_qtail;
};
#define RPS_NO_FILTER 0xffff
/*
* The rps_dev_flow_table structure contains a table of flow mappings.
*/
struct rps_dev_flow_table {
unsigned int mask;
struct rcu_head rcu;
struct rps_dev_flow flows[];
};
#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
((_num) * sizeof(struct rps_dev_flow)))
/*
* The rps_sock_flow_table contains mappings of flows to the last CPU
* on which they were processed by the application (set in recvmsg).
* Each entry is a 32bit value. Upper part is the high-order bits
* of flow hash, lower part is CPU number.
* rps_cpu_mask is used to partition the space, depending on number of
* possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1
* For example, if 64 CPUs are possible, rps_cpu_mask = 0x3f,
* meaning we use 32-6=26 bits for the hash.
*/
struct rps_sock_flow_table {
u32 mask;
u32 ents[] ____cacheline_aligned_in_smp;
};
#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
#define RPS_NO_CPU 0xffff
extern u32 rps_cpu_mask;
extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
u32 hash)
{
if (table && hash) {
unsigned int index = hash & table->mask;
u32 val = hash & ~rps_cpu_mask;
/* We only give a hint, preemption can change CPU under us */
val |= raw_smp_processor_id();
/* The following WRITE_ONCE() is paired with the READ_ONCE()
* here, and another one in get_rps_cpu().
*/
if (READ_ONCE(table->ents[index]) != val)
WRITE_ONCE(table->ents[index], val);
}
}
#ifdef CONFIG_RFS_ACCEL
bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
u16 filter_id);
#endif
#endif /* CONFIG_RPS */
/* XPS map type and offset of the xps map within net_device->xps_maps[]. */
enum xps_map_type {
@ -4793,11 +4711,6 @@ void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
const struct pcpu_sw_netstats __percpu *netstats);
void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s);
extern int netdev_max_backlog;
extern int dev_rx_weight;
extern int dev_tx_weight;
extern int gro_normal_batch;
enum {
NESTED_SYNC_IMM_BIT,
NESTED_SYNC_TODO_BIT,
@ -5307,7 +5220,6 @@ static inline const char *netdev_reg_state(const struct net_device *dev)
#define PTYPE_HASH_SIZE (16)
#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
extern struct list_head ptype_all __read_mostly;
extern struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
extern struct net_device *blackhole_netdev;

View File

@ -1271,7 +1271,6 @@ static inline void consume_skb(struct sk_buff *skb)
void __consume_stateless_skb(struct sk_buff *skb);
void __kfree_skb(struct sk_buff *skb);
extern struct kmem_cache *skbuff_cache;
void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,

View File

@ -9,6 +9,7 @@
#include <net/ip6_checksum.h>
#include <linux/skbuff.h>
#include <net/udp.h>
#include <net/hotdata.h>
struct napi_gro_cb {
union {
@ -446,7 +447,7 @@ static inline void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb,
{
list_add_tail(&skb->list, &napi->rx_list);
napi->rx_count += segs;
if (napi->rx_count >= READ_ONCE(gro_normal_batch))
if (napi->rx_count >= READ_ONCE(net_hotdata.gro_normal_batch))
gro_normal_list(napi);
}
@ -493,6 +494,4 @@ static inline void inet6_get_iif_sdif(const struct sk_buff *skb, int *iif, int *
#endif
}
extern struct list_head offload_base;
#endif /* _NET_IPV6_GRO_H */

52
include/net/hotdata.h Normal file
View File

@ -0,0 +1,52 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _NET_HOTDATA_H
#define _NET_HOTDATA_H
#include <linux/types.h>
#include <linux/netdevice.h>
#include <net/protocol.h>
/* Read mostly data used in network fast paths. */
struct net_hotdata {
#if IS_ENABLED(CONFIG_INET)
struct packet_offload ip_packet_offload;
struct net_offload tcpv4_offload;
struct net_protocol tcp_protocol;
struct net_offload udpv4_offload;
struct net_protocol udp_protocol;
struct packet_offload ipv6_packet_offload;
struct net_offload tcpv6_offload;
#if IS_ENABLED(CONFIG_IPV6)
struct inet6_protocol tcpv6_protocol;
struct inet6_protocol udpv6_protocol;
#endif
struct net_offload udpv6_offload;
#endif
struct list_head offload_base;
struct list_head ptype_all;
struct kmem_cache *skbuff_cache;
struct kmem_cache *skbuff_fclone_cache;
struct kmem_cache *skb_small_head_cache;
#ifdef CONFIG_RPS
struct rps_sock_flow_table __rcu *rps_sock_flow_table;
u32 rps_cpu_mask;
#endif
int gro_normal_batch;
int netdev_budget;
int netdev_budget_usecs;
int tstamp_prequeue;
int max_backlog;
int dev_tx_weight;
int dev_rx_weight;
};
#define inet_ehash_secret net_hotdata.tcp_protocol.secret
#define udp_ehash_secret net_hotdata.udp_protocol.secret
#define inet6_ehash_secret net_hotdata.tcpv6_protocol.secret
#define tcp_ipv6_hash_secret net_hotdata.tcpv6_offload.secret
#define udp6_ehash_secret net_hotdata.udpv6_protocol.secret
#define udp_ipv6_hash_secret net_hotdata.udpv6_offload.secret
extern struct net_hotdata net_hotdata;
#endif /* _NET_HOTDATA_H */

View File

@ -46,6 +46,7 @@ struct net_protocol {
* socket lookup?
*/
icmp_strict_tag_validation:1;
u32 secret;
};
#if IS_ENABLED(CONFIG_IPV6)
@ -59,6 +60,7 @@ struct inet6_protocol {
__be32 info);
unsigned int flags; /* INET6_PROTO_xxx */
u32 secret;
};
#define INET6_PROTO_NOPOLICY 0x1
@ -68,6 +70,7 @@ struct inet6_protocol {
struct net_offload {
struct offload_callbacks callbacks;
unsigned int flags; /* Flags used by IPv6 for now */
u32 secret;
};
/* This should be set for any extension header which is compatible with GSO. */
#define INET6_PROTO_GSO_EXTHDR 0x1

125
include/net/rps.h Normal file
View File

@ -0,0 +1,125 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _NET_RPS_H
#define _NET_RPS_H
#include <linux/types.h>
#include <linux/static_key.h>
#include <net/sock.h>
#include <net/hotdata.h>
#ifdef CONFIG_RPS
extern struct static_key_false rps_needed;
extern struct static_key_false rfs_needed;
/*
* This structure holds an RPS map which can be of variable length. The
* map is an array of CPUs.
*/
struct rps_map {
unsigned int len;
struct rcu_head rcu;
u16 cpus[];
};
#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
/*
* The rps_dev_flow structure contains the mapping of a flow to a CPU, the
* tail pointer for that CPU's input queue at the time of last enqueue, and
* a hardware filter index.
*/
struct rps_dev_flow {
u16 cpu;
u16 filter;
unsigned int last_qtail;
};
#define RPS_NO_FILTER 0xffff
/*
* The rps_dev_flow_table structure contains a table of flow mappings.
*/
struct rps_dev_flow_table {
unsigned int mask;
struct rcu_head rcu;
struct rps_dev_flow flows[];
};
#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
((_num) * sizeof(struct rps_dev_flow)))
/*
* The rps_sock_flow_table contains mappings of flows to the last CPU
* on which they were processed by the application (set in recvmsg).
* Each entry is a 32bit value. Upper part is the high-order bits
* of flow hash, lower part is CPU number.
* rps_cpu_mask is used to partition the space, depending on number of
* possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1
* For example, if 64 CPUs are possible, rps_cpu_mask = 0x3f,
* meaning we use 32-6=26 bits for the hash.
*/
struct rps_sock_flow_table {
u32 mask;
u32 ents[] ____cacheline_aligned_in_smp;
};
#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
#define RPS_NO_CPU 0xffff
static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
u32 hash)
{
unsigned int index = hash & table->mask;
u32 val = hash & ~net_hotdata.rps_cpu_mask;
/* We only give a hint, preemption can change CPU under us */
val |= raw_smp_processor_id();
/* The following WRITE_ONCE() is paired with the READ_ONCE()
* here, and another one in get_rps_cpu().
*/
if (READ_ONCE(table->ents[index]) != val)
WRITE_ONCE(table->ents[index], val);
}
#endif /* CONFIG_RPS */
static inline void sock_rps_record_flow_hash(__u32 hash)
{
#ifdef CONFIG_RPS
struct rps_sock_flow_table *sock_flow_table;
if (!hash)
return;
rcu_read_lock();
sock_flow_table = rcu_dereference(net_hotdata.rps_sock_flow_table);
if (sock_flow_table)
rps_record_sock_flow(sock_flow_table, hash);
rcu_read_unlock();
#endif
}
static inline void sock_rps_record_flow(const struct sock *sk)
{
#ifdef CONFIG_RPS
if (static_branch_unlikely(&rfs_needed)) {
/* Reading sk->sk_rxhash might incur an expensive cache line
* miss.
*
* TCP_ESTABLISHED does cover almost all states where RFS
* might be useful, and is cheaper [1] than testing :
* IPv4: inet_sk(sk)->inet_daddr
* IPv6: ipv6_addr_any(&sk->sk_v6_daddr)
* OR an additional socket flag
* [1] : sk_state and sk_prot are in the same cache line.
*/
if (sk->sk_state == TCP_ESTABLISHED) {
/* This READ_ONCE() is paired with the WRITE_ONCE()
* from sock_rps_save_rxhash() and sock_rps_reset_rxhash().
*/
sock_rps_record_flow_hash(READ_ONCE(sk->sk_rxhash));
}
}
#endif
}
#endif /* _NET_RPS_H */

View File

@ -1117,41 +1117,6 @@ static inline void sk_incoming_cpu_update(struct sock *sk)
WRITE_ONCE(sk->sk_incoming_cpu, cpu);
}
static inline void sock_rps_record_flow_hash(__u32 hash)
{
#ifdef CONFIG_RPS
struct rps_sock_flow_table *sock_flow_table;
rcu_read_lock();
sock_flow_table = rcu_dereference(rps_sock_flow_table);
rps_record_sock_flow(sock_flow_table, hash);
rcu_read_unlock();
#endif
}
static inline void sock_rps_record_flow(const struct sock *sk)
{
#ifdef CONFIG_RPS
if (static_branch_unlikely(&rfs_needed)) {
/* Reading sk->sk_rxhash might incur an expensive cache line
* miss.
*
* TCP_ESTABLISHED does cover almost all states where RFS
* might be useful, and is cheaper [1] than testing :
* IPv4: inet_sk(sk)->inet_daddr
* IPv6: ipv6_addr_any(&sk->sk_v6_daddr)
* OR an additional socket flag
* [1] : sk_state and sk_prot are in the same cache line.
*/
if (sk->sk_state == TCP_ESTABLISHED) {
/* This READ_ONCE() is paired with the WRITE_ONCE()
* from sock_rps_save_rxhash() and sock_rps_reset_rxhash().
*/
sock_rps_record_flow_hash(READ_ONCE(sk->sk_rxhash));
}
}
#endif
}
static inline void sock_rps_save_rxhash(struct sock *sk,
const struct sk_buff *skb)

View File

@ -24,6 +24,7 @@
#include <linux/filter.h>
#include <linux/ptr_ring.h>
#include <net/xdp.h>
#include <net/hotdata.h>
#include <linux/sched.h>
#include <linux/workqueue.h>
@ -326,7 +327,8 @@ static int cpu_map_kthread_run(void *data)
/* Support running another XDP prog on this CPU */
nframes = cpu_map_bpf_prog_run(rcpu, frames, xdp_n, &stats, &list);
if (nframes) {
m = kmem_cache_alloc_bulk(skbuff_cache, gfp, nframes, skbs);
m = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache,
gfp, nframes, skbs);
if (unlikely(m == 0)) {
for (i = 0; i < nframes; i++)
skbs[i] = NULL; /* effect: xdp_return_frame */

View File

@ -12,6 +12,7 @@
#include <linux/rcupdate_trace.h>
#include <linux/sched/signal.h>
#include <net/bpf_sk_storage.h>
#include <net/hotdata.h>
#include <net/sock.h>
#include <net/tcp.h>
#include <net/net_namespace.h>
@ -254,7 +255,8 @@ static int xdp_recv_frames(struct xdp_frame **frames, int nframes,
int i, n;
LIST_HEAD(list);
n = kmem_cache_alloc_bulk(skbuff_cache, gfp, nframes, (void **)skbs);
n = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, gfp, nframes,
(void **)skbs);
if (unlikely(n == 0)) {
for (i = 0; i < nframes; i++)
xdp_return_frame(frames[i]);

View File

@ -18,6 +18,7 @@ obj-y += dev.o dev_addr_lists.o dst.o netevent.o \
obj-$(CONFIG_NETDEV_ADDR_LIST_TEST) += dev_addr_lists_test.o
obj-y += net-sysfs.o
obj-y += hotdata.o
obj-$(CONFIG_PAGE_POOL) += page_pool.o page_pool_user.o
obj-$(CONFIG_PROC_FS) += net-procfs.o
obj-$(CONFIG_NET_PKTGEN) += pktgen.o

View File

@ -155,13 +155,13 @@
#include <net/netdev_rx_queue.h>
#include <net/page_pool/types.h>
#include <net/page_pool/helpers.h>
#include <net/rps.h>
#include "dev.h"
#include "net-sysfs.h"
static DEFINE_SPINLOCK(ptype_lock);
struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
struct list_head ptype_all __read_mostly; /* Taps */
static int netif_rx_internal(struct sk_buff *skb);
static int call_netdevice_notifiers_extack(unsigned long val,
@ -540,7 +540,7 @@ static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
static inline struct list_head *ptype_head(const struct packet_type *pt)
{
if (pt->type == htons(ETH_P_ALL))
return pt->dev ? &pt->dev->ptype_all : &ptype_all;
return pt->dev ? &pt->dev->ptype_all : &net_hotdata.ptype_all;
else
return pt->dev ? &pt->dev->ptype_specific :
&ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
@ -2226,7 +2226,8 @@ static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
*/
bool dev_nit_active(struct net_device *dev)
{
return !list_empty(&ptype_all) || !list_empty(&dev->ptype_all);
return !list_empty(&net_hotdata.ptype_all) ||
!list_empty(&dev->ptype_all);
}
EXPORT_SYMBOL_GPL(dev_nit_active);
@ -2237,10 +2238,9 @@ EXPORT_SYMBOL_GPL(dev_nit_active);
void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
{
struct packet_type *ptype;
struct list_head *ptype_list = &net_hotdata.ptype_all;
struct packet_type *ptype, *pt_prev = NULL;
struct sk_buff *skb2 = NULL;
struct packet_type *pt_prev = NULL;
struct list_head *ptype_list = &ptype_all;
rcu_read_lock();
again:
@ -2286,7 +2286,7 @@ again:
pt_prev = ptype;
}
if (ptype_list == &ptype_all) {
if (ptype_list == &net_hotdata.ptype_all) {
ptype_list = &dev->ptype_all;
goto again;
}
@ -4405,19 +4405,10 @@ EXPORT_SYMBOL(__dev_direct_xmit);
* Receiver routines
*************************************************************************/
int netdev_max_backlog __read_mostly = 1000;
EXPORT_SYMBOL(netdev_max_backlog);
int netdev_tstamp_prequeue __read_mostly = 1;
unsigned int sysctl_skb_defer_max __read_mostly = 64;
int netdev_budget __read_mostly = 300;
/* Must be at least 2 jiffes to guarantee 1 jiffy timeout */
unsigned int __read_mostly netdev_budget_usecs = 2 * USEC_PER_SEC / HZ;
int weight_p __read_mostly = 64; /* old backlog weight */
int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */
int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */
int dev_rx_weight __read_mostly = 64;
int dev_tx_weight __read_mostly = 64;
/* Called with irq disabled */
static inline void ____napi_schedule(struct softnet_data *sd,
@ -4459,12 +4450,6 @@ static inline void ____napi_schedule(struct softnet_data *sd,
#ifdef CONFIG_RPS
/* One global table that all flow-based protocols share. */
struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
EXPORT_SYMBOL(rps_sock_flow_table);
u32 rps_cpu_mask __read_mostly;
EXPORT_SYMBOL(rps_cpu_mask);
struct static_key_false rps_needed __read_mostly;
EXPORT_SYMBOL(rps_needed);
struct static_key_false rfs_needed __read_mostly;
@ -4556,7 +4541,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
if (!hash)
goto done;
sock_flow_table = rcu_dereference(rps_sock_flow_table);
sock_flow_table = rcu_dereference(net_hotdata.rps_sock_flow_table);
if (flow_table && sock_flow_table) {
struct rps_dev_flow *rflow;
u32 next_cpu;
@ -4566,10 +4551,10 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
* This READ_ONCE() pairs with WRITE_ONCE() from rps_record_sock_flow().
*/
ident = READ_ONCE(sock_flow_table->ents[hash & sock_flow_table->mask]);
if ((ident ^ hash) & ~rps_cpu_mask)
if ((ident ^ hash) & ~net_hotdata.rps_cpu_mask)
goto try_rps;
next_cpu = ident & rps_cpu_mask;
next_cpu = ident & net_hotdata.rps_cpu_mask;
/* OK, now we know there is a match,
* we can look at the local (per receive queue) flow table
@ -4718,7 +4703,7 @@ static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
struct softnet_data *sd;
unsigned int old_flow, new_flow;
if (qlen < (READ_ONCE(netdev_max_backlog) >> 1))
if (qlen < (READ_ONCE(net_hotdata.max_backlog) >> 1))
return false;
sd = this_cpu_ptr(&softnet_data);
@ -4766,7 +4751,8 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
if (!netif_running(skb->dev))
goto drop;
qlen = skb_queue_len(&sd->input_pkt_queue);
if (qlen <= READ_ONCE(netdev_max_backlog) && !skb_flow_limit(skb, qlen)) {
if (qlen <= READ_ONCE(net_hotdata.max_backlog) &&
!skb_flow_limit(skb, qlen)) {
if (qlen) {
enqueue:
__skb_queue_tail(&sd->input_pkt_queue, skb);
@ -5055,7 +5041,7 @@ static int netif_rx_internal(struct sk_buff *skb)
{
int ret;
net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue), skb);
trace_netif_rx(skb);
@ -5347,7 +5333,7 @@ static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
int ret = NET_RX_DROP;
__be16 type;
net_timestamp_check(!READ_ONCE(netdev_tstamp_prequeue), skb);
net_timestamp_check(!READ_ONCE(net_hotdata.tstamp_prequeue), skb);
trace_netif_receive_skb(skb);
@ -5391,7 +5377,7 @@ another_round:
if (pfmemalloc)
goto skip_taps;
list_for_each_entry_rcu(ptype, &ptype_all, list) {
list_for_each_entry_rcu(ptype, &net_hotdata.ptype_all, list) {
if (pt_prev)
ret = deliver_skb(skb, pt_prev, orig_dev);
pt_prev = ptype;
@ -5731,7 +5717,7 @@ static int netif_receive_skb_internal(struct sk_buff *skb)
{
int ret;
net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue), skb);
if (skb_defer_rx_timestamp(skb))
return NET_RX_SUCCESS;
@ -5761,7 +5747,8 @@ void netif_receive_skb_list_internal(struct list_head *head)
INIT_LIST_HEAD(&sublist);
list_for_each_entry_safe(skb, next, head, list) {
net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue),
skb);
skb_list_del_init(skb);
if (!skb_defer_rx_timestamp(skb))
list_add_tail(&skb->list, &sublist);
@ -5985,7 +5972,7 @@ static int process_backlog(struct napi_struct *napi, int quota)
net_rps_action_and_irq_enable(sd);
}
napi->weight = READ_ONCE(dev_rx_weight);
napi->weight = READ_ONCE(net_hotdata.dev_rx_weight);
while (again) {
struct sk_buff *skb;
@ -6790,8 +6777,8 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
unsigned long time_limit = jiffies +
usecs_to_jiffies(READ_ONCE(netdev_budget_usecs));
int budget = READ_ONCE(netdev_budget);
usecs_to_jiffies(READ_ONCE(net_hotdata.netdev_budget_usecs));
int budget = READ_ONCE(net_hotdata.netdev_budget);
LIST_HEAD(list);
LIST_HEAD(repoll);
@ -11752,7 +11739,6 @@ static int __init net_dev_init(void)
if (netdev_kobject_init())
goto out;
INIT_LIST_HEAD(&ptype_all);
for (i = 0; i < PTYPE_HASH_SIZE; i++)
INIT_LIST_HEAD(&ptype_base[i]);

View File

@ -38,10 +38,7 @@ int dev_addr_init(struct net_device *dev);
void dev_addr_check(struct net_device *dev);
/* sysctls not referred to from outside net/core/ */
extern int netdev_budget;
extern unsigned int netdev_budget_usecs;
extern unsigned int sysctl_skb_defer_max;
extern int netdev_tstamp_prequeue;
extern int netdev_unregister_timeout_secs;
extern int weight_p;
extern int dev_weight_rx_bias;

View File

@ -10,9 +10,6 @@
#define GRO_MAX_HEAD (MAX_HEADER + 128)
static DEFINE_SPINLOCK(offload_lock);
struct list_head offload_base __read_mostly = LIST_HEAD_INIT(offload_base);
/* Maximum number of GRO_NORMAL skbs to batch up for list-RX */
int gro_normal_batch __read_mostly = 8;
/**
* dev_add_offload - register offload handlers
@ -31,7 +28,7 @@ void dev_add_offload(struct packet_offload *po)
struct packet_offload *elem;
spin_lock(&offload_lock);
list_for_each_entry(elem, &offload_base, list) {
list_for_each_entry(elem, &net_hotdata.offload_base, list) {
if (po->priority < elem->priority)
break;
}
@ -55,7 +52,7 @@ EXPORT_SYMBOL(dev_add_offload);
*/
static void __dev_remove_offload(struct packet_offload *po)
{
struct list_head *head = &offload_base;
struct list_head *head = &net_hotdata.offload_base;
struct packet_offload *po1;
spin_lock(&offload_lock);
@ -235,9 +232,9 @@ done:
static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
{
struct list_head *head = &net_hotdata.offload_base;
struct packet_offload *ptype;
__be16 type = skb->protocol;
struct list_head *head = &offload_base;
int err = -ENOENT;
BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
@ -444,7 +441,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
{
u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
struct gro_list *gro_list = &napi->gro_hash[bucket];
struct list_head *head = &offload_base;
struct list_head *head = &net_hotdata.offload_base;
struct packet_offload *ptype;
__be16 type = skb->protocol;
struct sk_buff *pp = NULL;
@ -550,7 +547,7 @@ normal:
struct packet_offload *gro_find_receive_by_type(__be16 type)
{
struct list_head *offload_head = &offload_base;
struct list_head *offload_head = &net_hotdata.offload_base;
struct packet_offload *ptype;
list_for_each_entry_rcu(ptype, offload_head, list) {
@ -564,7 +561,7 @@ EXPORT_SYMBOL(gro_find_receive_by_type);
struct packet_offload *gro_find_complete_by_type(__be16 type)
{
struct list_head *offload_head = &offload_base;
struct list_head *offload_head = &net_hotdata.offload_base;
struct packet_offload *ptype;
list_for_each_entry_rcu(ptype, offload_head, list) {

View File

@ -3,6 +3,7 @@
#include <linux/slab.h>
#include <linux/netdevice.h>
#include <net/gro_cells.h>
#include <net/hotdata.h>
struct gro_cell {
struct sk_buff_head napi_skbs;
@ -26,7 +27,7 @@ int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
cell = this_cpu_ptr(gcells->cells);
if (skb_queue_len(&cell->napi_skbs) > READ_ONCE(netdev_max_backlog)) {
if (skb_queue_len(&cell->napi_skbs) > READ_ONCE(net_hotdata.max_backlog)) {
drop:
dev_core_stats_rx_dropped_inc(dev);
kfree_skb(skb);

View File

@ -17,7 +17,7 @@ struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb,
struct packet_offload *ptype;
rcu_read_lock();
list_for_each_entry_rcu(ptype, &offload_base, list) {
list_for_each_entry_rcu(ptype, &net_hotdata.offload_base, list) {
if (ptype->type == type && ptype->callbacks.gso_segment) {
segs = ptype->callbacks.gso_segment(skb, features);
break;
@ -48,7 +48,7 @@ struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
__skb_pull(skb, vlan_depth);
rcu_read_lock();
list_for_each_entry_rcu(ptype, &offload_base, list) {
list_for_each_entry_rcu(ptype, &net_hotdata.offload_base, list) {
if (ptype->type == type && ptype->callbacks.gso_segment) {
segs = ptype->callbacks.gso_segment(skb, features);
break;

22
net/core/hotdata.c Normal file
View File

@ -0,0 +1,22 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include <net/hotdata.h>
#include <linux/cache.h>
#include <linux/jiffies.h>
#include <linux/list.h>
struct net_hotdata net_hotdata __cacheline_aligned = {
.offload_base = LIST_HEAD_INIT(net_hotdata.offload_base),
.ptype_all = LIST_HEAD_INIT(net_hotdata.ptype_all),
.gro_normal_batch = 8,
.netdev_budget = 300,
/* Must be at least 2 jiffes to guarantee 1 jiffy timeout */
.netdev_budget_usecs = 2 * USEC_PER_SEC / HZ,
.tstamp_prequeue = 1,
.max_backlog = 1000,
.dev_tx_weight = 64,
.dev_rx_weight = 64,
};
EXPORT_SYMBOL(net_hotdata);

View File

@ -3,6 +3,7 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <net/wext.h>
#include <net/hotdata.h>
#include "dev.h"
@ -183,7 +184,7 @@ static void *ptype_get_idx(struct seq_file *seq, loff_t pos)
}
}
list_for_each_entry_rcu(pt, &ptype_all, list) {
list_for_each_entry_rcu(pt, &net_hotdata.ptype_all, list) {
if (i == pos)
return pt;
++i;
@ -231,13 +232,13 @@ static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
}
}
nxt = ptype_all.next;
nxt = net_hotdata.ptype_all.next;
goto ptype_all;
}
if (pt->type == htons(ETH_P_ALL)) {
ptype_all:
if (nxt != &ptype_all)
if (nxt != &net_hotdata.ptype_all)
goto found;
hash = 0;
nxt = ptype_base[0].next;

View File

@ -24,6 +24,7 @@
#include <linux/of_net.h>
#include <linux/cpu.h>
#include <net/netdev_rx_queue.h>
#include <net/rps.h>
#include "dev.h"
#include "net-sysfs.h"

View File

@ -69,6 +69,7 @@
#include <net/sock.h>
#include <net/checksum.h>
#include <net/gso.h>
#include <net/hotdata.h>
#include <net/ip6_checksum.h>
#include <net/xfrm.h>
#include <net/mpls.h>
@ -88,15 +89,10 @@
#include "dev.h"
#include "sock_destructor.h"
struct kmem_cache *skbuff_cache __ro_after_init;
static struct kmem_cache *skbuff_fclone_cache __ro_after_init;
#ifdef CONFIG_SKB_EXTENSIONS
static struct kmem_cache *skbuff_ext_cache __ro_after_init;
#endif
static struct kmem_cache *skb_small_head_cache __ro_after_init;
#define SKB_SMALL_HEAD_SIZE SKB_HEAD_ALIGN(MAX_TCP_HEADER)
/* We want SKB_SMALL_HEAD_CACHE_SIZE to not be a power of two.
@ -349,7 +345,7 @@ static struct sk_buff *napi_skb_cache_get(void)
struct sk_buff *skb;
if (unlikely(!nc->skb_count)) {
nc->skb_count = kmem_cache_alloc_bulk(skbuff_cache,
nc->skb_count = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache,
GFP_ATOMIC,
NAPI_SKB_CACHE_BULK,
nc->skb_cache);
@ -358,7 +354,7 @@ static struct sk_buff *napi_skb_cache_get(void)
}
skb = nc->skb_cache[--nc->skb_count];
kasan_mempool_unpoison_object(skb, kmem_cache_size(skbuff_cache));
kasan_mempool_unpoison_object(skb, kmem_cache_size(net_hotdata.skbuff_cache));
return skb;
}
@ -416,7 +412,7 @@ struct sk_buff *slab_build_skb(void *data)
struct sk_buff *skb;
unsigned int size;
skb = kmem_cache_alloc(skbuff_cache, GFP_ATOMIC);
skb = kmem_cache_alloc(net_hotdata.skbuff_cache, GFP_ATOMIC);
if (unlikely(!skb))
return NULL;
@ -467,7 +463,7 @@ struct sk_buff *__build_skb(void *data, unsigned int frag_size)
{
struct sk_buff *skb;
skb = kmem_cache_alloc(skbuff_cache, GFP_ATOMIC);
skb = kmem_cache_alloc(net_hotdata.skbuff_cache, GFP_ATOMIC);
if (unlikely(!skb))
return NULL;
@ -578,7 +574,7 @@ static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node,
obj_size = SKB_HEAD_ALIGN(*size);
if (obj_size <= SKB_SMALL_HEAD_CACHE_SIZE &&
!(flags & KMALLOC_NOT_NORMAL_BITS)) {
obj = kmem_cache_alloc_node(skb_small_head_cache,
obj = kmem_cache_alloc_node(net_hotdata.skb_small_head_cache,
flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
node);
*size = SKB_SMALL_HEAD_CACHE_SIZE;
@ -586,7 +582,7 @@ static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node,
goto out;
/* Try again but now we are using pfmemalloc reserves */
ret_pfmemalloc = true;
obj = kmem_cache_alloc_node(skb_small_head_cache, flags, node);
obj = kmem_cache_alloc_node(net_hotdata.skb_small_head_cache, flags, node);
goto out;
}
@ -649,7 +645,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
u8 *data;
cache = (flags & SKB_ALLOC_FCLONE)
? skbuff_fclone_cache : skbuff_cache;
? net_hotdata.skbuff_fclone_cache : net_hotdata.skbuff_cache;
if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
gfp_mask |= __GFP_MEMALLOC;
@ -1095,7 +1091,7 @@ static int skb_pp_frag_ref(struct sk_buff *skb)
static void skb_kfree_head(void *head, unsigned int end_offset)
{
if (end_offset == SKB_SMALL_HEAD_HEADROOM)
kmem_cache_free(skb_small_head_cache, head);
kmem_cache_free(net_hotdata.skb_small_head_cache, head);
else
kfree(head);
}
@ -1162,7 +1158,7 @@ static void kfree_skbmem(struct sk_buff *skb)
switch (skb->fclone) {
case SKB_FCLONE_UNAVAILABLE:
kmem_cache_free(skbuff_cache, skb);
kmem_cache_free(net_hotdata.skbuff_cache, skb);
return;
case SKB_FCLONE_ORIG:
@ -1183,7 +1179,7 @@ static void kfree_skbmem(struct sk_buff *skb)
if (!refcount_dec_and_test(&fclones->fclone_ref))
return;
fastpath:
kmem_cache_free(skbuff_fclone_cache, fclones);
kmem_cache_free(net_hotdata.skbuff_fclone_cache, fclones);
}
void skb_release_head_state(struct sk_buff *skb)
@ -1280,7 +1276,7 @@ static void kfree_skb_add_bulk(struct sk_buff *skb,
sa->skb_array[sa->skb_count++] = skb;
if (unlikely(sa->skb_count == KFREE_SKB_BULK_SIZE)) {
kmem_cache_free_bulk(skbuff_cache, KFREE_SKB_BULK_SIZE,
kmem_cache_free_bulk(net_hotdata.skbuff_cache, KFREE_SKB_BULK_SIZE,
sa->skb_array);
sa->skb_count = 0;
}
@ -1305,7 +1301,7 @@ kfree_skb_list_reason(struct sk_buff *segs, enum skb_drop_reason reason)
}
if (sa.skb_count)
kmem_cache_free_bulk(skbuff_cache, sa.skb_count, sa.skb_array);
kmem_cache_free_bulk(net_hotdata.skbuff_cache, sa.skb_count, sa.skb_array);
}
EXPORT_SYMBOL(kfree_skb_list_reason);
@ -1467,9 +1463,9 @@ static void napi_skb_cache_put(struct sk_buff *skb)
if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++)
kasan_mempool_unpoison_object(nc->skb_cache[i],
kmem_cache_size(skbuff_cache));
kmem_cache_size(net_hotdata.skbuff_cache));
kmem_cache_free_bulk(skbuff_cache, NAPI_SKB_CACHE_HALF,
kmem_cache_free_bulk(net_hotdata.skbuff_cache, NAPI_SKB_CACHE_HALF,
nc->skb_cache + NAPI_SKB_CACHE_HALF);
nc->skb_count = NAPI_SKB_CACHE_HALF;
}
@ -2066,7 +2062,7 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
if (skb_pfmemalloc(skb))
gfp_mask |= __GFP_MEMALLOC;
n = kmem_cache_alloc(skbuff_cache, gfp_mask);
n = kmem_cache_alloc(net_hotdata.skbuff_cache, gfp_mask);
if (!n)
return NULL;
@ -5005,7 +5001,7 @@ static void skb_extensions_init(void) {}
void __init skb_init(void)
{
skbuff_cache = kmem_cache_create_usercopy("skbuff_head_cache",
net_hotdata.skbuff_cache = kmem_cache_create_usercopy("skbuff_head_cache",
sizeof(struct sk_buff),
0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|
@ -5013,7 +5009,7 @@ void __init skb_init(void)
offsetof(struct sk_buff, cb),
sizeof_field(struct sk_buff, cb),
NULL);
skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
net_hotdata.skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
sizeof(struct sk_buff_fclones),
0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC,
@ -5022,7 +5018,7 @@ void __init skb_init(void)
* struct skb_shared_info is located at the end of skb->head,
* and should not be copied to/from user.
*/
skb_small_head_cache = kmem_cache_create_usercopy("skbuff_small_head",
net_hotdata.skb_small_head_cache = kmem_cache_create_usercopy("skbuff_small_head",
SKB_SMALL_HEAD_CACHE_SIZE,
0,
SLAB_HWCACHE_ALIGN | SLAB_PANIC,
@ -5895,7 +5891,7 @@ void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
{
if (head_stolen) {
skb_release_head_state(skb);
kmem_cache_free(skbuff_cache, skb);
kmem_cache_free(net_hotdata.skbuff_cache, skb);
} else {
__kfree_skb(skb);
}

View File

@ -23,6 +23,8 @@
#include <net/net_ratelimit.h>
#include <net/busy_poll.h>
#include <net/pkt_sched.h>
#include <net/hotdata.h>
#include <net/rps.h>
#include "dev.h"
@ -138,7 +140,8 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
mutex_lock(&sock_flow_mutex);
orig_sock_table = rcu_dereference_protected(rps_sock_flow_table,
orig_sock_table = rcu_dereference_protected(
net_hotdata.rps_sock_flow_table,
lockdep_is_held(&sock_flow_mutex));
size = orig_size = orig_sock_table ? orig_sock_table->mask + 1 : 0;
@ -159,7 +162,8 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
mutex_unlock(&sock_flow_mutex);
return -ENOMEM;
}
rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1;
net_hotdata.rps_cpu_mask =
roundup_pow_of_two(nr_cpu_ids) - 1;
sock_table->mask = size - 1;
} else
sock_table = orig_sock_table;
@ -170,7 +174,8 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
sock_table = NULL;
if (sock_table != orig_sock_table) {
rcu_assign_pointer(rps_sock_flow_table, sock_table);
rcu_assign_pointer(net_hotdata.rps_sock_flow_table,
sock_table);
if (sock_table) {
static_branch_inc(&rps_needed);
static_branch_inc(&rfs_needed);
@ -300,8 +305,8 @@ static int proc_do_dev_weight(struct ctl_table *table, int write,
ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (!ret && write) {
weight = READ_ONCE(weight_p);
WRITE_ONCE(dev_rx_weight, weight * dev_weight_rx_bias);
WRITE_ONCE(dev_tx_weight, weight * dev_weight_tx_bias);
WRITE_ONCE(net_hotdata.dev_rx_weight, weight * dev_weight_rx_bias);
WRITE_ONCE(net_hotdata.dev_tx_weight, weight * dev_weight_tx_bias);
}
mutex_unlock(&dev_weight_mutex);
@ -439,7 +444,7 @@ static struct ctl_table net_core_table[] = {
},
{
.procname = "netdev_max_backlog",
.data = &netdev_max_backlog,
.data = &net_hotdata.max_backlog,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
@ -498,7 +503,7 @@ static struct ctl_table net_core_table[] = {
#endif
{
.procname = "netdev_tstamp_prequeue",
.data = &netdev_tstamp_prequeue,
.data = &net_hotdata.tstamp_prequeue,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
@ -576,7 +581,7 @@ static struct ctl_table net_core_table[] = {
#endif
{
.procname = "netdev_budget",
.data = &netdev_budget,
.data = &net_hotdata.netdev_budget,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
@ -599,7 +604,7 @@ static struct ctl_table net_core_table[] = {
},
{
.procname = "netdev_budget_usecs",
.data = &netdev_budget_usecs,
.data = &net_hotdata.netdev_budget_usecs,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
@ -632,7 +637,7 @@ static struct ctl_table net_core_table[] = {
},
{
.procname = "gro_normal_batch",
.data = &gro_normal_batch,
.data = &net_hotdata.gro_normal_batch,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,

View File

@ -16,6 +16,7 @@
#include <linux/bug.h>
#include <net/page_pool/helpers.h>
#include <net/hotdata.h>
#include <net/xdp.h>
#include <net/xdp_priv.h> /* struct xdp_mem_allocator */
#include <trace/events/xdp.h>
@ -589,7 +590,7 @@ EXPORT_SYMBOL_GPL(xdp_warn);
int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp)
{
n_skb = kmem_cache_alloc_bulk(skbuff_cache, gfp, n_skb, skbs);
n_skb = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, gfp, n_skb, skbs);
if (unlikely(!n_skb))
return -ENOMEM;
@ -658,7 +659,7 @@ struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf,
{
struct sk_buff *skb;
skb = kmem_cache_alloc(skbuff_cache, GFP_ATOMIC);
skb = kmem_cache_alloc(net_hotdata.skbuff_cache, GFP_ATOMIC);
if (unlikely(!skb))
return NULL;

View File

@ -119,6 +119,7 @@
#endif
#include <net/l3mdev.h>
#include <net/compat.h>
#include <net/rps.h>
#include <trace/events/sock.h>
@ -1751,19 +1752,6 @@ static const struct net_protocol igmp_protocol = {
};
#endif
static const struct net_protocol tcp_protocol = {
.handler = tcp_v4_rcv,
.err_handler = tcp_v4_err,
.no_policy = 1,
.icmp_strict_tag_validation = 1,
};
static const struct net_protocol udp_protocol = {
.handler = udp_rcv,
.err_handler = udp_err,
.no_policy = 1,
};
static const struct net_protocol icmp_protocol = {
.handler = icmp_rcv,
.err_handler = icmp_err,
@ -1904,14 +1892,6 @@ static int ipv4_proc_init(void);
* IP protocol layer initialiser
*/
static struct packet_offload ip_packet_offload __read_mostly = {
.type = cpu_to_be16(ETH_P_IP),
.callbacks = {
.gso_segment = inet_gso_segment,
.gro_receive = inet_gro_receive,
.gro_complete = inet_gro_complete,
},
};
static const struct net_offload ipip_offload = {
.callbacks = {
@ -1938,7 +1918,15 @@ static int __init ipv4_offload_init(void)
if (ipip_offload_init() < 0)
pr_crit("%s: Cannot add IPIP protocol offload\n", __func__);
dev_add_offload(&ip_packet_offload);
net_hotdata.ip_packet_offload = (struct packet_offload) {
.type = cpu_to_be16(ETH_P_IP),
.callbacks = {
.gso_segment = inet_gso_segment,
.gro_receive = inet_gro_receive,
.gro_complete = inet_gro_complete,
},
};
dev_add_offload(&net_hotdata.ip_packet_offload);
return 0;
}
@ -1992,9 +1980,22 @@ static int __init inet_init(void)
if (inet_add_protocol(&icmp_protocol, IPPROTO_ICMP) < 0)
pr_crit("%s: Cannot add ICMP protocol\n", __func__);
if (inet_add_protocol(&udp_protocol, IPPROTO_UDP) < 0)
net_hotdata.udp_protocol = (struct net_protocol) {
.handler = udp_rcv,
.err_handler = udp_err,
.no_policy = 1,
};
if (inet_add_protocol(&net_hotdata.udp_protocol, IPPROTO_UDP) < 0)
pr_crit("%s: Cannot add UDP protocol\n", __func__);
if (inet_add_protocol(&tcp_protocol, IPPROTO_TCP) < 0)
net_hotdata.tcp_protocol = (struct net_protocol) {
.handler = tcp_v4_rcv,
.err_handler = tcp_v4_err,
.no_policy = 1,
.icmp_strict_tag_validation = 1,
};
if (inet_add_protocol(&net_hotdata.tcp_protocol, IPPROTO_TCP) < 0)
pr_crit("%s: Cannot add TCP protocol\n", __func__);
#ifdef CONFIG_IP_MULTICAST
if (inet_add_protocol(&igmp_protocol, IPPROTO_IGMP) < 0)

View File

@ -24,6 +24,7 @@
#include <net/inet6_hashtables.h>
#endif
#include <net/secure_seq.h>
#include <net/hotdata.h>
#include <net/ip.h>
#include <net/tcp.h>
#include <net/sock_reuseport.h>
@ -32,8 +33,6 @@ u32 inet_ehashfn(const struct net *net, const __be32 laddr,
const __u16 lport, const __be32 faddr,
const __be16 fport)
{
static u32 inet_ehash_secret __read_mostly;
net_get_random_once(&inet_ehash_secret, sizeof(inet_ehash_secret));
return __inet_ehashfn(laddr, lport, faddr, fport,

View File

@ -279,6 +279,7 @@
#include <linux/uaccess.h>
#include <asm/ioctls.h>
#include <net/busy_poll.h>
#include <net/rps.h>
/* Track pending CMSGs. */
enum {

View File

@ -345,15 +345,14 @@ INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
return 0;
}
static const struct net_offload tcpv4_offload = {
.callbacks = {
.gso_segment = tcp4_gso_segment,
.gro_receive = tcp4_gro_receive,
.gro_complete = tcp4_gro_complete,
},
};
int __init tcpv4_offload_init(void)
{
return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);
net_hotdata.tcpv4_offload = (struct net_offload) {
.callbacks = {
.gso_segment = tcp4_gso_segment,
.gro_receive = tcp4_gro_receive,
.gro_complete = tcp4_gro_complete,
},
};
return inet_add_offload(&net_hotdata.tcpv4_offload, IPPROTO_TCP);
}

View File

@ -411,8 +411,6 @@ INDIRECT_CALLABLE_SCOPE
u32 udp_ehashfn(const struct net *net, const __be32 laddr, const __u16 lport,
const __be32 faddr, const __be16 fport)
{
static u32 udp_ehash_secret __read_mostly;
net_get_random_once(&udp_ehash_secret, sizeof(udp_ehash_secret));
return __inet_ehashfn(laddr, lport, faddr, fport,

View File

@ -737,15 +737,14 @@ INDIRECT_CALLABLE_SCOPE int udp4_gro_complete(struct sk_buff *skb, int nhoff)
return udp_gro_complete(skb, nhoff, udp4_lib_lookup_skb);
}
static const struct net_offload udpv4_offload = {
.callbacks = {
.gso_segment = udp4_ufo_fragment,
.gro_receive = udp4_gro_receive,
.gro_complete = udp4_gro_complete,
},
};
int __init udpv4_offload_init(void)
{
return inet_add_offload(&udpv4_offload, IPPROTO_UDP);
net_hotdata.udpv4_offload = (struct net_offload) {
.callbacks = {
.gso_segment = udp4_ufo_fragment,
.gro_receive = udp4_gro_receive,
.gro_complete = udp4_gro_complete,
},
};
return inet_add_offload(&net_hotdata.udpv4_offload, IPPROTO_UDP);
}

View File

@ -64,6 +64,7 @@
#include <net/xfrm.h>
#include <net/ioam6.h>
#include <net/rawv6.h>
#include <net/rps.h>
#include <linux/uaccess.h>
#include <linux/mroute6.h>

View File

@ -14,6 +14,7 @@
#include <linux/random.h>
#include <net/addrconf.h>
#include <net/hotdata.h>
#include <net/inet_connection_sock.h>
#include <net/inet_hashtables.h>
#include <net/inet6_hashtables.h>
@ -25,16 +26,13 @@ u32 inet6_ehashfn(const struct net *net,
const struct in6_addr *laddr, const u16 lport,
const struct in6_addr *faddr, const __be16 fport)
{
static u32 inet6_ehash_secret __read_mostly;
static u32 ipv6_hash_secret __read_mostly;
u32 lhash, fhash;
net_get_random_once(&inet6_ehash_secret, sizeof(inet6_ehash_secret));
net_get_random_once(&ipv6_hash_secret, sizeof(ipv6_hash_secret));
net_get_random_once(&tcp_ipv6_hash_secret, sizeof(tcp_ipv6_hash_secret));
lhash = (__force u32)laddr->s6_addr32[3];
fhash = __ipv6_addr_jhash(faddr, ipv6_hash_secret);
fhash = __ipv6_addr_jhash(faddr, tcp_ipv6_hash_secret);
return __inet6_ehashfn(lhash, lport, fhash, fport,
inet6_ehash_secret + net_hash_mix(net));

View File

@ -419,14 +419,6 @@ static int ip4ip6_gro_complete(struct sk_buff *skb, int nhoff)
return inet_gro_complete(skb, nhoff);
}
static struct packet_offload ipv6_packet_offload __read_mostly = {
.type = cpu_to_be16(ETH_P_IPV6),
.callbacks = {
.gso_segment = ipv6_gso_segment,
.gro_receive = ipv6_gro_receive,
.gro_complete = ipv6_gro_complete,
},
};
static struct sk_buff *sit_gso_segment(struct sk_buff *skb,
netdev_features_t features)
@ -486,7 +478,15 @@ static int __init ipv6_offload_init(void)
if (ipv6_exthdrs_offload_init() < 0)
pr_crit("%s: Cannot add EXTHDRS protocol offload\n", __func__);
dev_add_offload(&ipv6_packet_offload);
net_hotdata.ipv6_packet_offload = (struct packet_offload) {
.type = cpu_to_be16(ETH_P_IPV6),
.callbacks = {
.gso_segment = ipv6_gso_segment,
.gro_receive = ipv6_gro_receive,
.gro_complete = ipv6_gro_complete,
},
};
dev_add_offload(&net_hotdata.ipv6_packet_offload);
inet_add_offload(&sit_offload, IPPROTO_IPV6);
inet6_add_offload(&ip6ip6_offload, IPPROTO_IPV6);

View File

@ -58,6 +58,7 @@
#include <net/timewait_sock.h>
#include <net/inet_common.h>
#include <net/secure_seq.h>
#include <net/hotdata.h>
#include <net/busy_poll.h>
#include <linux/proc_fs.h>
@ -2367,11 +2368,6 @@ struct proto tcpv6_prot = {
};
EXPORT_SYMBOL_GPL(tcpv6_prot);
static const struct inet6_protocol tcpv6_protocol = {
.handler = tcp_v6_rcv,
.err_handler = tcp_v6_err,
.flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
};
static struct inet_protosw tcpv6_protosw = {
.type = SOCK_STREAM,
@ -2408,7 +2404,12 @@ int __init tcpv6_init(void)
{
int ret;
ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
net_hotdata.tcpv6_protocol = (struct inet6_protocol) {
.handler = tcp_v6_rcv,
.err_handler = tcp_v6_err,
.flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
};
ret = inet6_add_protocol(&net_hotdata.tcpv6_protocol, IPPROTO_TCP);
if (ret)
goto out;
@ -2433,7 +2434,7 @@ out_tcpv6_pernet_subsys:
out_tcpv6_protosw:
inet6_unregister_protosw(&tcpv6_protosw);
out_tcpv6_protocol:
inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
inet6_del_protocol(&net_hotdata.tcpv6_protocol, IPPROTO_TCP);
goto out;
}
@ -2441,5 +2442,5 @@ void tcpv6_exit(void)
{
unregister_pernet_subsys(&tcpv6_net_ops);
inet6_unregister_protosw(&tcpv6_protosw);
inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
inet6_del_protocol(&net_hotdata.tcpv6_protocol, IPPROTO_TCP);
}

View File

@ -66,15 +66,15 @@ static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
return tcp_gso_segment(skb, features);
}
static const struct net_offload tcpv6_offload = {
.callbacks = {
.gso_segment = tcp6_gso_segment,
.gro_receive = tcp6_gro_receive,
.gro_complete = tcp6_gro_complete,
},
};
int __init tcpv6_offload_init(void)
{
return inet6_add_offload(&tcpv6_offload, IPPROTO_TCP);
net_hotdata.tcpv6_offload = (struct net_offload) {
.callbacks = {
.gso_segment = tcp6_gso_segment,
.gro_receive = tcp6_gro_receive,
.gro_complete = tcp6_gro_complete,
},
};
return inet6_add_offload(&net_hotdata.tcpv6_offload, IPPROTO_TCP);
}

View File

@ -79,9 +79,6 @@ u32 udp6_ehashfn(const struct net *net,
const struct in6_addr *faddr,
const __be16 fport)
{
static u32 udp6_ehash_secret __read_mostly;
static u32 udp_ipv6_hash_secret __read_mostly;
u32 lhash, fhash;
net_get_random_once(&udp6_ehash_secret,
@ -1702,11 +1699,6 @@ int udpv6_getsockopt(struct sock *sk, int level, int optname,
return ipv6_getsockopt(sk, level, optname, optval, optlen);
}
static const struct inet6_protocol udpv6_protocol = {
.handler = udpv6_rcv,
.err_handler = udpv6_err,
.flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
};
/* ------------------------------------------------------------------------ */
#ifdef CONFIG_PROC_FS
@ -1803,7 +1795,12 @@ int __init udpv6_init(void)
{
int ret;
ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP);
net_hotdata.udpv6_protocol = (struct inet6_protocol) {
.handler = udpv6_rcv,
.err_handler = udpv6_err,
.flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
};
ret = inet6_add_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP);
if (ret)
goto out;
@ -1814,12 +1811,12 @@ out:
return ret;
out_udpv6_protocol:
inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
inet6_del_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP);
goto out;
}
void udpv6_exit(void)
{
inet6_unregister_protosw(&udpv6_protosw);
inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
inet6_del_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP);
}

View File

@ -192,20 +192,19 @@ INDIRECT_CALLABLE_SCOPE int udp6_gro_complete(struct sk_buff *skb, int nhoff)
return udp_gro_complete(skb, nhoff, udp6_lib_lookup_skb);
}
static const struct net_offload udpv6_offload = {
.callbacks = {
.gso_segment = udp6_ufo_fragment,
.gro_receive = udp6_gro_receive,
.gro_complete = udp6_gro_complete,
},
};
int udpv6_offload_init(void)
int __init udpv6_offload_init(void)
{
return inet6_add_offload(&udpv6_offload, IPPROTO_UDP);
net_hotdata.udpv6_offload = (struct net_offload) {
.callbacks = {
.gso_segment = udp6_ufo_fragment,
.gro_receive = udp6_gro_receive,
.gro_complete = udp6_gro_complete,
},
};
return inet6_add_offload(&net_hotdata.udpv6_offload, IPPROTO_UDP);
}
int udpv6_offload_exit(void)
{
return inet6_del_offload(&udpv6_offload, IPPROTO_UDP);
return inet6_del_offload(&net_hotdata.udpv6_offload, IPPROTO_UDP);
}

View File

@ -27,6 +27,7 @@
#include <net/sch_generic.h>
#include <net/pkt_sched.h>
#include <net/dst.h>
#include <net/hotdata.h>
#include <trace/events/qdisc.h>
#include <trace/events/net.h>
#include <net/xfrm.h>
@ -409,7 +410,7 @@ static inline bool qdisc_restart(struct Qdisc *q, int *packets)
void __qdisc_run(struct Qdisc *q)
{
int quota = READ_ONCE(dev_tx_weight);
int quota = READ_ONCE(net_hotdata.dev_tx_weight);
int packets;
while (qdisc_restart(q, &packets)) {

View File

@ -67,6 +67,7 @@
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
#include <net/sctp/stream_sched.h>
#include <net/rps.h>
/* Forward declarations for internal helper functions. */
static bool sctp_writeable(const struct sock *sk);

View File

@ -10,6 +10,7 @@
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6_stubs.h>
#endif
#include <net/hotdata.h>
static void handle_nonesp(struct espintcp_ctx *ctx, struct sk_buff *skb,
struct sock *sk)
@ -169,7 +170,8 @@ int espintcp_queue_out(struct sock *sk, struct sk_buff *skb)
{
struct espintcp_ctx *ctx = espintcp_getctx(sk);
if (skb_queue_len(&ctx->out_queue) >= READ_ONCE(netdev_max_backlog))
if (skb_queue_len(&ctx->out_queue) >=
READ_ONCE(net_hotdata.max_backlog))
return -ENOBUFS;
__skb_queue_tail(&ctx->out_queue, skb);

View File

@ -21,6 +21,7 @@
#include <net/ip_tunnels.h>
#include <net/ip6_tunnel.h>
#include <net/dst_metadata.h>
#include <net/hotdata.h>
#include "xfrm_inout.h"
@ -764,7 +765,7 @@ int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
trans = this_cpu_ptr(&xfrm_trans_tasklet);
if (skb_queue_len(&trans->queue) >= READ_ONCE(netdev_max_backlog))
if (skb_queue_len(&trans->queue) >= READ_ONCE(net_hotdata.max_backlog))
return -ENOBUFS;
BUILD_BUG_ON(sizeof(struct xfrm_trans_cb) > sizeof(skb->cb));