2012-08-26 17:14:06 +00:00
|
|
|
/*
|
|
|
|
* (C) 1999-2001 Paul `Rusty' Russell
|
2006-12-03 06:07:13 +00:00
|
|
|
* (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
|
2012-08-26 17:14:06 +00:00
|
|
|
* (C) 2011 Patrick McHardy <kaber@trash.net>
|
2006-12-03 06:07:13 +00:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*/
|
|
|
|
|
2018-03-12 13:06:29 +00:00
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
|
2006-12-03 06:07:13 +00:00
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/timer.h>
|
|
|
|
#include <linux/skbuff.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 08:04:11 +00:00
|
|
|
#include <linux/gfp.h>
|
2012-08-26 17:14:06 +00:00
|
|
|
#include <net/xfrm.h>
|
2006-12-03 06:07:13 +00:00
|
|
|
#include <linux/jhash.h>
|
2012-08-26 17:14:06 +00:00
|
|
|
#include <linux/rtnetlink.h>
|
2006-12-03 06:07:13 +00:00
|
|
|
|
|
|
|
#include <net/netfilter/nf_conntrack.h>
|
|
|
|
#include <net/netfilter/nf_conntrack_core.h>
|
|
|
|
#include <net/netfilter/nf_nat.h>
|
|
|
|
#include <net/netfilter/nf_nat_helper.h>
|
|
|
|
#include <net/netfilter/nf_conntrack_helper.h>
|
2013-08-27 06:50:12 +00:00
|
|
|
#include <net/netfilter/nf_conntrack_seqadj.h>
|
2010-02-15 17:13:33 +00:00
|
|
|
#include <net/netfilter/nf_conntrack_zones.h>
|
2012-08-26 17:14:06 +00:00
|
|
|
#include <linux/netfilter/nf_nat.h>
|
2006-12-03 06:07:13 +00:00
|
|
|
|
2018-05-14 21:46:57 +00:00
|
|
|
#include "nf_internals.h"
|
|
|
|
|
2017-09-06 12:39:52 +00:00
|
|
|
static spinlock_t nf_nat_locks[CONNTRACK_LOCKS];
|
2017-09-06 12:39:51 +00:00
|
|
|
|
2012-08-26 17:14:06 +00:00
|
|
|
static DEFINE_MUTEX(nf_nat_proto_mutex);
|
2018-05-14 21:46:57 +00:00
|
|
|
static unsigned int nat_net_id __read_mostly;
|
2016-05-09 14:24:31 +00:00
|
|
|
|
2017-09-06 12:39:51 +00:00
|
|
|
static struct hlist_head *nf_nat_bysource __read_mostly;
|
|
|
|
static unsigned int nf_nat_htable_size __read_mostly;
|
|
|
|
static unsigned int nf_nat_hash_rnd __read_mostly;
|
2012-08-26 17:14:06 +00:00
|
|
|
|
2018-05-14 21:46:57 +00:00
|
|
|
struct nf_nat_lookup_hook_priv {
|
|
|
|
struct nf_hook_entries __rcu *entries;
|
|
|
|
|
|
|
|
struct rcu_head rcu_head;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct nf_nat_hooks_net {
|
|
|
|
struct nf_hook_ops *nat_hook_ops;
|
|
|
|
unsigned int users;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct nat_net {
|
|
|
|
struct nf_nat_hooks_net nat_proto_net[NFPROTO_NUMPROTO];
|
|
|
|
};
|
|
|
|
|
2012-08-26 17:14:06 +00:00
|
|
|
#ifdef CONFIG_XFRM
|
2019-02-19 16:38:20 +00:00
|
|
|
static void nf_nat_ipv4_decode_session(struct sk_buff *skb,
|
|
|
|
const struct nf_conn *ct,
|
|
|
|
enum ip_conntrack_dir dir,
|
|
|
|
unsigned long statusbit,
|
|
|
|
struct flowi *fl)
|
|
|
|
{
|
|
|
|
const struct nf_conntrack_tuple *t = &ct->tuplehash[dir].tuple;
|
|
|
|
struct flowi4 *fl4 = &fl->u.ip4;
|
|
|
|
|
|
|
|
if (ct->status & statusbit) {
|
|
|
|
fl4->daddr = t->dst.u3.ip;
|
|
|
|
if (t->dst.protonum == IPPROTO_TCP ||
|
|
|
|
t->dst.protonum == IPPROTO_UDP ||
|
|
|
|
t->dst.protonum == IPPROTO_UDPLITE ||
|
|
|
|
t->dst.protonum == IPPROTO_DCCP ||
|
|
|
|
t->dst.protonum == IPPROTO_SCTP)
|
|
|
|
fl4->fl4_dport = t->dst.u.all;
|
|
|
|
}
|
|
|
|
|
|
|
|
statusbit ^= IPS_NAT_MASK;
|
|
|
|
|
|
|
|
if (ct->status & statusbit) {
|
|
|
|
fl4->saddr = t->src.u3.ip;
|
|
|
|
if (t->dst.protonum == IPPROTO_TCP ||
|
|
|
|
t->dst.protonum == IPPROTO_UDP ||
|
|
|
|
t->dst.protonum == IPPROTO_UDPLITE ||
|
|
|
|
t->dst.protonum == IPPROTO_DCCP ||
|
|
|
|
t->dst.protonum == IPPROTO_SCTP)
|
|
|
|
fl4->fl4_sport = t->src.u.all;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nf_nat_ipv6_decode_session(struct sk_buff *skb,
|
|
|
|
const struct nf_conn *ct,
|
|
|
|
enum ip_conntrack_dir dir,
|
|
|
|
unsigned long statusbit,
|
|
|
|
struct flowi *fl)
|
|
|
|
{
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
const struct nf_conntrack_tuple *t = &ct->tuplehash[dir].tuple;
|
|
|
|
struct flowi6 *fl6 = &fl->u.ip6;
|
|
|
|
|
|
|
|
if (ct->status & statusbit) {
|
|
|
|
fl6->daddr = t->dst.u3.in6;
|
|
|
|
if (t->dst.protonum == IPPROTO_TCP ||
|
|
|
|
t->dst.protonum == IPPROTO_UDP ||
|
|
|
|
t->dst.protonum == IPPROTO_UDPLITE ||
|
|
|
|
t->dst.protonum == IPPROTO_DCCP ||
|
|
|
|
t->dst.protonum == IPPROTO_SCTP)
|
|
|
|
fl6->fl6_dport = t->dst.u.all;
|
|
|
|
}
|
|
|
|
|
|
|
|
statusbit ^= IPS_NAT_MASK;
|
|
|
|
|
|
|
|
if (ct->status & statusbit) {
|
|
|
|
fl6->saddr = t->src.u3.in6;
|
|
|
|
if (t->dst.protonum == IPPROTO_TCP ||
|
|
|
|
t->dst.protonum == IPPROTO_UDP ||
|
|
|
|
t->dst.protonum == IPPROTO_UDPLITE ||
|
|
|
|
t->dst.protonum == IPPROTO_DCCP ||
|
|
|
|
t->dst.protonum == IPPROTO_SCTP)
|
|
|
|
fl6->fl6_sport = t->src.u.all;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2012-08-26 17:14:06 +00:00
|
|
|
static void __nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl)
|
|
|
|
{
|
|
|
|
const struct nf_conn *ct;
|
|
|
|
enum ip_conntrack_info ctinfo;
|
|
|
|
enum ip_conntrack_dir dir;
|
|
|
|
unsigned long statusbit;
|
|
|
|
u8 family;
|
|
|
|
|
|
|
|
ct = nf_ct_get(skb, &ctinfo);
|
|
|
|
if (ct == NULL)
|
|
|
|
return;
|
|
|
|
|
2017-03-27 15:28:50 +00:00
|
|
|
family = nf_ct_l3num(ct);
|
2012-08-26 17:14:06 +00:00
|
|
|
dir = CTINFO2DIR(ctinfo);
|
|
|
|
if (dir == IP_CT_DIR_ORIGINAL)
|
|
|
|
statusbit = IPS_DST_NAT;
|
|
|
|
else
|
|
|
|
statusbit = IPS_SRC_NAT;
|
|
|
|
|
2019-02-19 16:38:20 +00:00
|
|
|
switch (family) {
|
|
|
|
case NFPROTO_IPV4:
|
|
|
|
nf_nat_ipv4_decode_session(skb, ct, dir, statusbit, fl);
|
|
|
|
return;
|
|
|
|
case NFPROTO_IPV6:
|
|
|
|
nf_nat_ipv6_decode_session(skb, ct, dir, statusbit, fl);
|
|
|
|
return;
|
|
|
|
}
|
2012-08-26 17:14:06 +00:00
|
|
|
}
|
|
|
|
|
2015-09-18 19:33:07 +00:00
|
|
|
int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family)
|
2012-08-26 17:14:06 +00:00
|
|
|
{
|
|
|
|
struct flowi fl;
|
|
|
|
unsigned int hh_len;
|
|
|
|
struct dst_entry *dst;
|
2018-06-27 13:34:25 +00:00
|
|
|
struct sock *sk = skb->sk;
|
2013-04-05 06:41:12 +00:00
|
|
|
int err;
|
2012-08-26 17:14:06 +00:00
|
|
|
|
2013-04-05 06:41:12 +00:00
|
|
|
err = xfrm_decode_session(skb, &fl, family);
|
2013-04-24 05:11:51 +00:00
|
|
|
if (err < 0)
|
2013-04-05 06:41:12 +00:00
|
|
|
return err;
|
2012-08-26 17:14:06 +00:00
|
|
|
|
|
|
|
dst = skb_dst(skb);
|
|
|
|
if (dst->xfrm)
|
|
|
|
dst = ((struct xfrm_dst *)dst)->route;
|
2018-12-11 06:45:29 +00:00
|
|
|
if (!dst_hold_safe(dst))
|
|
|
|
return -EHOSTUNREACH;
|
2012-08-26 17:14:06 +00:00
|
|
|
|
2018-06-27 13:34:25 +00:00
|
|
|
if (sk && !net_eq(net, sock_net(sk)))
|
|
|
|
sk = NULL;
|
|
|
|
|
|
|
|
dst = xfrm_lookup(net, dst, &fl, sk, 0);
|
2012-08-26 17:14:06 +00:00
|
|
|
if (IS_ERR(dst))
|
2013-04-05 06:41:12 +00:00
|
|
|
return PTR_ERR(dst);
|
2012-08-26 17:14:06 +00:00
|
|
|
|
|
|
|
skb_dst_drop(skb);
|
|
|
|
skb_dst_set(skb, dst);
|
|
|
|
|
|
|
|
/* Change in oif may mean change in hh_len. */
|
|
|
|
hh_len = skb_dst(skb)->dev->hard_header_len;
|
|
|
|
if (skb_headroom(skb) < hh_len &&
|
|
|
|
pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC))
|
2013-04-05 06:41:12 +00:00
|
|
|
return -ENOMEM;
|
2012-08-26 17:14:06 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(nf_xfrm_me_harder);
|
|
|
|
#endif /* CONFIG_XFRM */
|
|
|
|
|
2017-09-06 12:39:51 +00:00
|
|
|
/* We keep an extra hash for each conntrack, for fast searching. */
|
|
|
|
static unsigned int
|
|
|
|
hash_by_src(const struct net *n, const struct nf_conntrack_tuple *tuple)
|
2006-12-03 06:07:13 +00:00
|
|
|
{
|
2017-09-06 12:39:51 +00:00
|
|
|
unsigned int hash;
|
|
|
|
|
|
|
|
get_random_once(&nf_nat_hash_rnd, sizeof(nf_nat_hash_rnd));
|
2016-04-18 14:17:00 +00:00
|
|
|
|
2006-12-03 06:07:13 +00:00
|
|
|
/* Original src, to ensure we map it consistently if poss. */
|
2017-09-06 12:39:51 +00:00
|
|
|
hash = jhash2((u32 *)&tuple->src, sizeof(tuple->src) / sizeof(u32),
|
|
|
|
tuple->dst.protonum ^ nf_nat_hash_rnd ^ net_hash_mix(n));
|
2014-08-23 18:58:54 +00:00
|
|
|
|
2017-09-06 12:39:51 +00:00
|
|
|
return reciprocal_scale(hash, nf_nat_htable_size);
|
2006-12-03 06:07:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Is this tuple already taken? (not by us) */
|
2019-01-16 23:11:43 +00:00
|
|
|
static int
|
2006-12-03 06:07:13 +00:00
|
|
|
nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
|
|
|
|
const struct nf_conn *ignored_conntrack)
|
|
|
|
{
|
|
|
|
/* Conntrack tracking doesn't keep track of outgoing tuples; only
|
2012-08-26 17:14:06 +00:00
|
|
|
* incoming ones. NAT means they don't have a fixed mapping,
|
|
|
|
* so we invert the tuple and look for the incoming reply.
|
|
|
|
*
|
|
|
|
* We could keep a separate hash if this proves too slow.
|
|
|
|
*/
|
2006-12-03 06:07:13 +00:00
|
|
|
struct nf_conntrack_tuple reply;
|
|
|
|
|
2019-01-15 21:03:42 +00:00
|
|
|
nf_ct_invert_tuple(&reply, tuple);
|
2006-12-03 06:07:13 +00:00
|
|
|
return nf_conntrack_tuple_taken(&reply, ignored_conntrack);
|
|
|
|
}
|
|
|
|
|
2018-12-13 15:01:30 +00:00
|
|
|
static bool nf_nat_inet_in_range(const struct nf_conntrack_tuple *t,
|
|
|
|
const struct nf_nat_range2 *range)
|
|
|
|
{
|
|
|
|
if (t->src.l3num == NFPROTO_IPV4)
|
|
|
|
return ntohl(t->src.u3.ip) >= ntohl(range->min_addr.ip) &&
|
|
|
|
ntohl(t->src.u3.ip) <= ntohl(range->max_addr.ip);
|
|
|
|
|
|
|
|
return ipv6_addr_cmp(&t->src.u3.in6, &range->min_addr.in6) >= 0 &&
|
|
|
|
ipv6_addr_cmp(&t->src.u3.in6, &range->max_addr.in6) <= 0;
|
|
|
|
}
|
|
|
|
|
2018-12-13 15:01:31 +00:00
|
|
|
/* Is the manipable part of the tuple between min and max incl? */
|
|
|
|
static bool l4proto_in_range(const struct nf_conntrack_tuple *tuple,
|
|
|
|
enum nf_nat_manip_type maniptype,
|
|
|
|
const union nf_conntrack_man_proto *min,
|
|
|
|
const union nf_conntrack_man_proto *max)
|
|
|
|
{
|
|
|
|
__be16 port;
|
|
|
|
|
|
|
|
switch (tuple->dst.protonum) {
|
2019-02-22 08:58:44 +00:00
|
|
|
case IPPROTO_ICMP:
|
2018-12-13 15:01:31 +00:00
|
|
|
case IPPROTO_ICMPV6:
|
|
|
|
return ntohs(tuple->src.u.icmp.id) >= ntohs(min->icmp.id) &&
|
|
|
|
ntohs(tuple->src.u.icmp.id) <= ntohs(max->icmp.id);
|
|
|
|
case IPPROTO_GRE: /* all fall though */
|
|
|
|
case IPPROTO_TCP:
|
|
|
|
case IPPROTO_UDP:
|
|
|
|
case IPPROTO_UDPLITE:
|
|
|
|
case IPPROTO_DCCP:
|
|
|
|
case IPPROTO_SCTP:
|
|
|
|
if (maniptype == NF_NAT_MANIP_SRC)
|
|
|
|
port = tuple->src.u.all;
|
|
|
|
else
|
|
|
|
port = tuple->dst.u.all;
|
|
|
|
|
|
|
|
return ntohs(port) >= ntohs(min->all) &&
|
|
|
|
ntohs(port) <= ntohs(max->all);
|
|
|
|
default:
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-12-03 06:07:13 +00:00
|
|
|
/* If we source map this tuple so reply looks like reply_tuple, will
|
2012-08-26 17:14:06 +00:00
|
|
|
* that meet the constraints of range.
|
|
|
|
*/
|
2018-12-13 15:01:31 +00:00
|
|
|
static int in_range(const struct nf_conntrack_tuple *tuple,
|
2018-04-04 13:38:22 +00:00
|
|
|
const struct nf_nat_range2 *range)
|
2006-12-03 06:07:13 +00:00
|
|
|
{
|
|
|
|
/* If we are supposed to map IPs, then we must be in the
|
2012-08-26 17:14:06 +00:00
|
|
|
* range specified, otherwise let this drag us onto a new src IP.
|
|
|
|
*/
|
|
|
|
if (range->flags & NF_NAT_RANGE_MAP_IPS &&
|
2018-12-13 15:01:30 +00:00
|
|
|
!nf_nat_inet_in_range(tuple, range))
|
2012-08-26 17:14:06 +00:00
|
|
|
return 0;
|
2006-12-03 06:07:13 +00:00
|
|
|
|
2018-12-13 15:01:31 +00:00
|
|
|
if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED))
|
2012-08-26 17:14:06 +00:00
|
|
|
return 1;
|
2006-12-03 06:07:13 +00:00
|
|
|
|
2018-12-13 15:01:31 +00:00
|
|
|
return l4proto_in_range(tuple, NF_NAT_MANIP_SRC,
|
|
|
|
&range->min_proto, &range->max_proto);
|
2006-12-03 06:07:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
same_src(const struct nf_conn *ct,
|
|
|
|
const struct nf_conntrack_tuple *tuple)
|
|
|
|
{
|
|
|
|
const struct nf_conntrack_tuple *t;
|
|
|
|
|
|
|
|
t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
|
|
|
|
return (t->dst.protonum == tuple->dst.protonum &&
|
2012-08-26 17:14:06 +00:00
|
|
|
nf_inet_addr_cmp(&t->src.u3, &tuple->src.u3) &&
|
2006-12-03 06:07:13 +00:00
|
|
|
t->src.u.all == tuple->src.u.all);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Only called for SRC manip */
|
|
|
|
static int
|
2015-08-08 19:40:01 +00:00
|
|
|
find_appropriate_src(struct net *net,
|
|
|
|
const struct nf_conntrack_zone *zone,
|
2008-10-08 09:35:11 +00:00
|
|
|
const struct nf_conntrack_tuple *tuple,
|
2006-12-03 06:07:13 +00:00
|
|
|
struct nf_conntrack_tuple *result,
|
2018-04-04 13:38:22 +00:00
|
|
|
const struct nf_nat_range2 *range)
|
2006-12-03 06:07:13 +00:00
|
|
|
{
|
2017-09-06 12:39:51 +00:00
|
|
|
unsigned int h = hash_by_src(net, tuple);
|
2008-04-14 09:15:42 +00:00
|
|
|
const struct nf_conn *ct;
|
2016-07-05 10:07:24 +00:00
|
|
|
|
2017-09-06 12:39:51 +00:00
|
|
|
hlist_for_each_entry_rcu(ct, &nf_nat_bysource[h], nat_bysource) {
|
|
|
|
if (same_src(ct, tuple) &&
|
|
|
|
net_eq(net, nf_ct_net(ct)) &&
|
|
|
|
nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL)) {
|
|
|
|
/* Copy source part from reply tuple. */
|
2019-01-15 21:03:42 +00:00
|
|
|
nf_ct_invert_tuple(result,
|
2017-09-06 12:39:51 +00:00
|
|
|
&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
|
|
|
|
result->dst = tuple->dst;
|
|
|
|
|
2018-12-13 15:01:31 +00:00
|
|
|
if (in_range(result, range))
|
2017-09-06 12:39:51 +00:00
|
|
|
return 1;
|
|
|
|
}
|
2017-07-07 11:07:17 +00:00
|
|
|
}
|
|
|
|
return 0;
|
2006-12-03 06:07:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* For [FUTURE] fragmentation handling, we want the least-used
|
2012-08-26 17:14:06 +00:00
|
|
|
* src-ip/dst-ip/proto triple. Fairness doesn't come into it. Thus
|
|
|
|
* if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports
|
|
|
|
* 1-65535, we don't do pro-rata allocation based on ports; we choose
|
|
|
|
* the ip with the lowest src-ip/dst-ip/proto usage.
|
|
|
|
*/
|
2006-12-03 06:07:13 +00:00
|
|
|
static void
|
2015-08-08 19:40:01 +00:00
|
|
|
find_best_ips_proto(const struct nf_conntrack_zone *zone,
|
|
|
|
struct nf_conntrack_tuple *tuple,
|
2018-04-04 13:38:22 +00:00
|
|
|
const struct nf_nat_range2 *range,
|
2006-12-03 06:07:13 +00:00
|
|
|
const struct nf_conn *ct,
|
|
|
|
enum nf_nat_manip_type maniptype)
|
|
|
|
{
|
2012-08-26 17:14:06 +00:00
|
|
|
union nf_inet_addr *var_ipp;
|
|
|
|
unsigned int i, max;
|
2006-12-03 06:07:13 +00:00
|
|
|
/* Host order */
|
2012-08-26 17:14:06 +00:00
|
|
|
u32 minip, maxip, j, dist;
|
|
|
|
bool full_range;
|
2006-12-03 06:07:13 +00:00
|
|
|
|
|
|
|
/* No IP mapping? Do nothing. */
|
2011-12-23 12:59:49 +00:00
|
|
|
if (!(range->flags & NF_NAT_RANGE_MAP_IPS))
|
2006-12-03 06:07:13 +00:00
|
|
|
return;
|
|
|
|
|
2011-12-23 12:59:49 +00:00
|
|
|
if (maniptype == NF_NAT_MANIP_SRC)
|
2012-08-26 17:14:06 +00:00
|
|
|
var_ipp = &tuple->src.u3;
|
2006-12-03 06:07:13 +00:00
|
|
|
else
|
2012-08-26 17:14:06 +00:00
|
|
|
var_ipp = &tuple->dst.u3;
|
2006-12-03 06:07:13 +00:00
|
|
|
|
|
|
|
/* Fast path: only one choice. */
|
2012-08-26 17:14:06 +00:00
|
|
|
if (nf_inet_addr_cmp(&range->min_addr, &range->max_addr)) {
|
|
|
|
*var_ipp = range->min_addr;
|
2006-12-03 06:07:13 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-08-26 17:14:06 +00:00
|
|
|
if (nf_ct_l3num(ct) == NFPROTO_IPV4)
|
|
|
|
max = sizeof(var_ipp->ip) / sizeof(u32) - 1;
|
|
|
|
else
|
|
|
|
max = sizeof(var_ipp->ip6) / sizeof(u32) - 1;
|
|
|
|
|
2006-12-03 06:07:13 +00:00
|
|
|
/* Hashing source and destination IPs gives a fairly even
|
|
|
|
* spread in practice (if there are a small number of IPs
|
|
|
|
* involved, there usually aren't that many connections
|
|
|
|
* anyway). The consistency means that servers see the same
|
|
|
|
* client coming from the same IP (some Internet Banking sites
|
2012-08-26 17:14:06 +00:00
|
|
|
* like this), even across reboots.
|
|
|
|
*/
|
2012-09-05 10:10:28 +00:00
|
|
|
j = jhash2((u32 *)&tuple->src.u3, sizeof(tuple->src.u3) / sizeof(u32),
|
2012-08-26 17:14:06 +00:00
|
|
|
range->flags & NF_NAT_RANGE_PERSISTENT ?
|
2015-08-08 19:40:01 +00:00
|
|
|
0 : (__force u32)tuple->dst.u3.all[max] ^ zone->id);
|
2012-08-26 17:14:06 +00:00
|
|
|
|
|
|
|
full_range = false;
|
|
|
|
for (i = 0; i <= max; i++) {
|
|
|
|
/* If first bytes of the address are at the maximum, use the
|
|
|
|
* distance. Otherwise use the full range.
|
|
|
|
*/
|
|
|
|
if (!full_range) {
|
|
|
|
minip = ntohl((__force __be32)range->min_addr.all[i]);
|
|
|
|
maxip = ntohl((__force __be32)range->max_addr.all[i]);
|
|
|
|
dist = maxip - minip + 1;
|
|
|
|
} else {
|
|
|
|
minip = 0;
|
|
|
|
dist = ~0;
|
|
|
|
}
|
|
|
|
|
|
|
|
var_ipp->all[i] = (__force __u32)
|
2014-08-23 18:58:54 +00:00
|
|
|
htonl(minip + reciprocal_scale(j, dist));
|
2012-08-26 17:14:06 +00:00
|
|
|
if (var_ipp->all[i] != range->max_addr.all[i])
|
|
|
|
full_range = true;
|
|
|
|
|
|
|
|
if (!(range->flags & NF_NAT_RANGE_PERSISTENT))
|
|
|
|
j ^= (__force u32)tuple->dst.u3.all[i];
|
|
|
|
}
|
2006-12-03 06:07:13 +00:00
|
|
|
}
|
|
|
|
|
2018-12-13 15:01:29 +00:00
|
|
|
/* Alter the per-proto part of the tuple (depending on maniptype), to
|
|
|
|
* give a unique tuple in the given range if possible.
|
|
|
|
*
|
|
|
|
* Per-protocol part of tuple is initialized to the incoming packet.
|
|
|
|
*/
|
2018-12-13 15:01:28 +00:00
|
|
|
static void nf_nat_l4proto_unique_tuple(struct nf_conntrack_tuple *tuple,
|
|
|
|
const struct nf_nat_range2 *range,
|
|
|
|
enum nf_nat_manip_type maniptype,
|
|
|
|
const struct nf_conn *ct)
|
|
|
|
{
|
|
|
|
unsigned int range_size, min, max, i, attempts;
|
2018-12-13 15:01:29 +00:00
|
|
|
__be16 *keyptr;
|
2018-12-13 15:01:28 +00:00
|
|
|
u16 off;
|
|
|
|
static const unsigned int max_attempts = 128;
|
|
|
|
|
2018-12-13 15:01:29 +00:00
|
|
|
switch (tuple->dst.protonum) {
|
|
|
|
case IPPROTO_ICMP: /* fallthrough */
|
|
|
|
case IPPROTO_ICMPV6:
|
|
|
|
/* id is same for either direction... */
|
|
|
|
keyptr = &tuple->src.u.icmp.id;
|
|
|
|
min = range->min_proto.icmp.id;
|
|
|
|
range_size = ntohs(range->max_proto.icmp.id) -
|
|
|
|
ntohs(range->min_proto.icmp.id) + 1;
|
|
|
|
goto find_free_id;
|
|
|
|
#if IS_ENABLED(CONFIG_NF_CT_PROTO_GRE)
|
|
|
|
case IPPROTO_GRE:
|
|
|
|
/* If there is no master conntrack we are not PPTP,
|
|
|
|
do not change tuples */
|
|
|
|
if (!ct->master)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (maniptype == NF_NAT_MANIP_SRC)
|
|
|
|
keyptr = &tuple->src.u.gre.key;
|
|
|
|
else
|
|
|
|
keyptr = &tuple->dst.u.gre.key;
|
|
|
|
|
|
|
|
if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
|
|
|
|
min = 1;
|
|
|
|
range_size = 65535;
|
|
|
|
} else {
|
|
|
|
min = ntohs(range->min_proto.gre.key);
|
|
|
|
range_size = ntohs(range->max_proto.gre.key) - min + 1;
|
|
|
|
}
|
|
|
|
goto find_free_id;
|
|
|
|
#endif
|
|
|
|
case IPPROTO_UDP: /* fallthrough */
|
|
|
|
case IPPROTO_UDPLITE: /* fallthrough */
|
|
|
|
case IPPROTO_TCP: /* fallthrough */
|
|
|
|
case IPPROTO_SCTP: /* fallthrough */
|
|
|
|
case IPPROTO_DCCP: /* fallthrough */
|
|
|
|
if (maniptype == NF_NAT_MANIP_SRC)
|
|
|
|
keyptr = &tuple->src.u.all;
|
|
|
|
else
|
|
|
|
keyptr = &tuple->dst.u.all;
|
|
|
|
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return;
|
|
|
|
}
|
2018-12-13 15:01:28 +00:00
|
|
|
|
|
|
|
/* If no range specified... */
|
|
|
|
if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
|
|
|
|
/* If it's dst rewrite, can't change port */
|
|
|
|
if (maniptype == NF_NAT_MANIP_DST)
|
|
|
|
return;
|
|
|
|
|
2018-12-13 15:01:29 +00:00
|
|
|
if (ntohs(*keyptr) < 1024) {
|
2018-12-13 15:01:28 +00:00
|
|
|
/* Loose convention: >> 512 is credential passing */
|
2018-12-13 15:01:29 +00:00
|
|
|
if (ntohs(*keyptr) < 512) {
|
2018-12-13 15:01:28 +00:00
|
|
|
min = 1;
|
|
|
|
range_size = 511 - min + 1;
|
|
|
|
} else {
|
|
|
|
min = 600;
|
|
|
|
range_size = 1023 - min + 1;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
min = 1024;
|
|
|
|
range_size = 65535 - 1024 + 1;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
min = ntohs(range->min_proto.all);
|
|
|
|
max = ntohs(range->max_proto.all);
|
|
|
|
if (unlikely(max < min))
|
|
|
|
swap(max, min);
|
|
|
|
range_size = max - min + 1;
|
|
|
|
}
|
|
|
|
|
2018-12-13 15:01:29 +00:00
|
|
|
find_free_id:
|
2018-12-13 15:01:28 +00:00
|
|
|
if (range->flags & NF_NAT_RANGE_PROTO_OFFSET)
|
2018-12-13 15:01:29 +00:00
|
|
|
off = (ntohs(*keyptr) - ntohs(range->base_proto.all));
|
2018-12-13 15:01:28 +00:00
|
|
|
else
|
|
|
|
off = prandom_u32();
|
|
|
|
|
|
|
|
attempts = range_size;
|
|
|
|
if (attempts > max_attempts)
|
|
|
|
attempts = max_attempts;
|
|
|
|
|
|
|
|
/* We are in softirq; doing a search of the entire range risks
|
|
|
|
* soft lockup when all tuples are already used.
|
|
|
|
*
|
|
|
|
* If we can't find any free port from first offset, pick a new
|
|
|
|
* one and try again, with ever smaller search window.
|
|
|
|
*/
|
|
|
|
another_round:
|
|
|
|
for (i = 0; i < attempts; i++, off++) {
|
2018-12-13 15:01:29 +00:00
|
|
|
*keyptr = htons(min + off % range_size);
|
2018-12-13 15:01:28 +00:00
|
|
|
if (!nf_nat_used_tuple(tuple, ct))
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (attempts >= range_size || attempts < 16)
|
|
|
|
return;
|
|
|
|
attempts /= 2;
|
|
|
|
off = prandom_u32();
|
|
|
|
goto another_round;
|
|
|
|
}
|
|
|
|
|
2012-08-26 17:14:06 +00:00
|
|
|
/* Manipulate the tuple into the range given. For NF_INET_POST_ROUTING,
|
|
|
|
* we change the source to map into the range. For NF_INET_PRE_ROUTING
|
2007-11-20 02:53:30 +00:00
|
|
|
* and NF_INET_LOCAL_OUT, we change the destination to map into the
|
2012-08-26 17:14:06 +00:00
|
|
|
* range. It might not be possible to get a unique tuple, but we try.
|
2006-12-03 06:07:13 +00:00
|
|
|
* At worst (or if we race), we will end up with a final duplicate in
|
|
|
|
* __ip_conntrack_confirm and drop the packet. */
|
|
|
|
static void
|
|
|
|
get_unique_tuple(struct nf_conntrack_tuple *tuple,
|
|
|
|
const struct nf_conntrack_tuple *orig_tuple,
|
2018-04-04 13:38:22 +00:00
|
|
|
const struct nf_nat_range2 *range,
|
2006-12-03 06:07:13 +00:00
|
|
|
struct nf_conn *ct,
|
|
|
|
enum nf_nat_manip_type maniptype)
|
|
|
|
{
|
2015-08-08 19:40:01 +00:00
|
|
|
const struct nf_conntrack_zone *zone;
|
2008-10-08 09:35:11 +00:00
|
|
|
struct net *net = nf_ct_net(ct);
|
2015-08-08 19:40:01 +00:00
|
|
|
|
|
|
|
zone = nf_ct_zone(ct);
|
2006-12-03 06:07:13 +00:00
|
|
|
|
2012-08-26 17:14:06 +00:00
|
|
|
/* 1) If this srcip/proto/src-proto-part is currently mapped,
|
|
|
|
* and that same mapping gives a unique tuple within the given
|
|
|
|
* range, use that.
|
|
|
|
*
|
|
|
|
* This is only required for source (ie. NAT/masq) mappings.
|
|
|
|
* So far, we don't do local source mappings, so multiple
|
|
|
|
* manips not an issue.
|
|
|
|
*/
|
2011-12-23 12:59:49 +00:00
|
|
|
if (maniptype == NF_NAT_MANIP_SRC &&
|
netfilter: nf_nat: add full port randomization support
We currently use prandom_u32() for allocation of ports in tcp bind(0)
and udp code. In case of plain SNAT we try to keep the ports as is
or increment on collision.
SNAT --random mode does use per-destination incrementing port
allocation. As a recent paper pointed out in [1] that this mode of
port allocation makes it possible to an attacker to find the randomly
allocated ports through a timing side-channel in a socket overloading
attack conducted through an off-path attacker.
So, NF_NAT_RANGE_PROTO_RANDOM actually weakens the port randomization
in regard to the attack described in this paper. As we need to keep
compatibility, add another flag called NF_NAT_RANGE_PROTO_RANDOM_FULLY
that would replace the NF_NAT_RANGE_PROTO_RANDOM hash-based port
selection algorithm with a simple prandom_u32() in order to mitigate
this attack vector. Note that the lfsr113's internal state is
periodically reseeded by the kernel through a local secure entropy
source.
More details can be found in [1], the basic idea is to send bursts
of packets to a socket to overflow its receive queue and measure
the latency to detect a possible retransmit when the port is found.
Because of increasing ports to given destination and port, further
allocations can be predicted. This information could then be used by
an attacker for e.g. for cache-poisoning, NS pinning, and degradation
of service attacks against DNS servers [1]:
The best defense against the poisoning attacks is to properly
deploy and validate DNSSEC; DNSSEC provides security not only
against off-path attacker but even against MitM attacker. We hope
that our results will help motivate administrators to adopt DNSSEC.
However, full DNSSEC deployment make take significant time, and
until that happens, we recommend short-term, non-cryptographic
defenses. We recommend to support full port randomisation,
according to practices recommended in [2], and to avoid
per-destination sequential port allocation, which we show may be
vulnerable to derandomisation attacks.
Joint work between Hannes Frederic Sowa and Daniel Borkmann.
[1] https://sites.google.com/site/hayashulman/files/NIC-derandomisation.pdf
[2] http://arxiv.org/pdf/1205.5190v1.pdf
Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
2013-12-20 21:40:29 +00:00
|
|
|
!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
|
2011-01-20 14:49:52 +00:00
|
|
|
/* try the original tuple first */
|
2018-12-13 15:01:31 +00:00
|
|
|
if (in_range(orig_tuple, range)) {
|
2011-01-20 14:49:52 +00:00
|
|
|
if (!nf_nat_used_tuple(orig_tuple, ct)) {
|
|
|
|
*tuple = *orig_tuple;
|
2018-12-13 15:01:31 +00:00
|
|
|
return;
|
2011-01-20 14:49:52 +00:00
|
|
|
}
|
2018-12-13 15:01:31 +00:00
|
|
|
} else if (find_appropriate_src(net, zone,
|
2012-08-26 17:14:06 +00:00
|
|
|
orig_tuple, tuple, range)) {
|
2007-07-08 05:39:38 +00:00
|
|
|
pr_debug("get_unique_tuple: Found current src map\n");
|
2008-07-21 17:00:51 +00:00
|
|
|
if (!nf_nat_used_tuple(tuple, ct))
|
2018-12-13 15:01:31 +00:00
|
|
|
return;
|
2006-12-03 06:07:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-08-26 17:14:06 +00:00
|
|
|
/* 2) Select the least-used IP/proto combination in the given range */
|
2006-12-03 06:07:13 +00:00
|
|
|
*tuple = *orig_tuple;
|
2010-02-15 17:13:33 +00:00
|
|
|
find_best_ips_proto(zone, tuple, range, ct, maniptype);
|
2006-12-03 06:07:13 +00:00
|
|
|
|
|
|
|
/* 3) The per-protocol part of the manip is made to map into
|
2012-08-26 17:14:06 +00:00
|
|
|
* the range to make a unique tuple.
|
|
|
|
*/
|
2006-12-03 06:07:13 +00:00
|
|
|
|
|
|
|
/* Only bother mapping if it's not already in range and unique */
|
netfilter: nf_nat: add full port randomization support
We currently use prandom_u32() for allocation of ports in tcp bind(0)
and udp code. In case of plain SNAT we try to keep the ports as is
or increment on collision.
SNAT --random mode does use per-destination incrementing port
allocation. As a recent paper pointed out in [1] that this mode of
port allocation makes it possible to an attacker to find the randomly
allocated ports through a timing side-channel in a socket overloading
attack conducted through an off-path attacker.
So, NF_NAT_RANGE_PROTO_RANDOM actually weakens the port randomization
in regard to the attack described in this paper. As we need to keep
compatibility, add another flag called NF_NAT_RANGE_PROTO_RANDOM_FULLY
that would replace the NF_NAT_RANGE_PROTO_RANDOM hash-based port
selection algorithm with a simple prandom_u32() in order to mitigate
this attack vector. Note that the lfsr113's internal state is
periodically reseeded by the kernel through a local secure entropy
source.
More details can be found in [1], the basic idea is to send bursts
of packets to a socket to overflow its receive queue and measure
the latency to detect a possible retransmit when the port is found.
Because of increasing ports to given destination and port, further
allocations can be predicted. This information could then be used by
an attacker for e.g. for cache-poisoning, NS pinning, and degradation
of service attacks against DNS servers [1]:
The best defense against the poisoning attacks is to properly
deploy and validate DNSSEC; DNSSEC provides security not only
against off-path attacker but even against MitM attacker. We hope
that our results will help motivate administrators to adopt DNSSEC.
However, full DNSSEC deployment make take significant time, and
until that happens, we recommend short-term, non-cryptographic
defenses. We recommend to support full port randomisation,
according to practices recommended in [2], and to avoid
per-destination sequential port allocation, which we show may be
vulnerable to derandomisation attacks.
Joint work between Hannes Frederic Sowa and Daniel Borkmann.
[1] https://sites.google.com/site/hayashulman/files/NIC-derandomisation.pdf
[2] http://arxiv.org/pdf/1205.5190v1.pdf
Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
2013-12-20 21:40:29 +00:00
|
|
|
if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
|
2011-12-23 12:59:49 +00:00
|
|
|
if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
|
2018-04-04 13:38:22 +00:00
|
|
|
if (!(range->flags & NF_NAT_RANGE_PROTO_OFFSET) &&
|
2018-12-13 15:01:31 +00:00
|
|
|
l4proto_in_range(tuple, maniptype,
|
2018-04-04 13:38:22 +00:00
|
|
|
&range->min_proto,
|
|
|
|
&range->max_proto) &&
|
2012-08-26 17:14:06 +00:00
|
|
|
(range->min_proto.all == range->max_proto.all ||
|
2010-09-16 17:45:19 +00:00
|
|
|
!nf_nat_used_tuple(tuple, ct)))
|
2018-12-13 15:01:31 +00:00
|
|
|
return;
|
2010-09-16 17:45:19 +00:00
|
|
|
} else if (!nf_nat_used_tuple(tuple, ct)) {
|
2018-12-13 15:01:31 +00:00
|
|
|
return;
|
2010-09-16 17:45:19 +00:00
|
|
|
}
|
|
|
|
}
|
2006-12-03 06:07:13 +00:00
|
|
|
|
2018-04-04 13:38:22 +00:00
|
|
|
/* Last chance: get protocol to try to obtain unique tuple. */
|
2018-12-13 15:01:29 +00:00
|
|
|
nf_nat_l4proto_unique_tuple(tuple, range, maniptype, ct);
|
2006-12-03 06:07:13 +00:00
|
|
|
}
|
|
|
|
|
2014-04-28 19:09:50 +00:00
|
|
|
struct nf_conn_nat *nf_ct_nat_ext_add(struct nf_conn *ct)
|
|
|
|
{
|
|
|
|
struct nf_conn_nat *nat = nfct_nat(ct);
|
|
|
|
if (nat)
|
|
|
|
return nat;
|
|
|
|
|
|
|
|
if (!nf_ct_is_confirmed(ct))
|
|
|
|
nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
|
|
|
|
|
|
|
|
return nat;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nf_ct_nat_ext_add);
|
|
|
|
|
2006-12-03 06:07:13 +00:00
|
|
|
unsigned int
|
|
|
|
nf_nat_setup_info(struct nf_conn *ct,
|
2018-04-04 13:38:22 +00:00
|
|
|
const struct nf_nat_range2 *range,
|
2007-12-18 06:38:20 +00:00
|
|
|
enum nf_nat_manip_type maniptype)
|
2006-12-03 06:07:13 +00:00
|
|
|
{
|
2017-09-06 12:39:51 +00:00
|
|
|
struct net *net = nf_ct_net(ct);
|
2006-12-03 06:07:13 +00:00
|
|
|
struct nf_conntrack_tuple curr_tuple, new_tuple;
|
2007-07-08 05:24:28 +00:00
|
|
|
|
2017-05-06 12:28:02 +00:00
|
|
|
/* Can't setup nat info for confirmed ct. */
|
|
|
|
if (nf_ct_is_confirmed(ct))
|
|
|
|
return NF_ACCEPT;
|
|
|
|
|
2017-08-30 08:07:11 +00:00
|
|
|
WARN_ON(maniptype != NF_NAT_MANIP_SRC &&
|
|
|
|
maniptype != NF_NAT_MANIP_DST);
|
2017-08-31 11:45:24 +00:00
|
|
|
|
|
|
|
if (WARN_ON(nf_nat_initialized(ct, maniptype)))
|
|
|
|
return NF_DROP;
|
2006-12-03 06:07:13 +00:00
|
|
|
|
|
|
|
/* What we've got will look like inverse of reply. Normally
|
2012-08-26 17:14:06 +00:00
|
|
|
* this is what is in the conntrack, except for prior
|
|
|
|
* manipulations (future optimization: if num_manips == 0,
|
|
|
|
* orig_tp = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
|
|
|
|
*/
|
2019-01-15 21:03:42 +00:00
|
|
|
nf_ct_invert_tuple(&curr_tuple,
|
|
|
|
&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
|
2006-12-03 06:07:13 +00:00
|
|
|
|
|
|
|
get_unique_tuple(&new_tuple, &curr_tuple, range, ct, maniptype);
|
|
|
|
|
|
|
|
if (!nf_ct_tuple_equal(&new_tuple, &curr_tuple)) {
|
|
|
|
struct nf_conntrack_tuple reply;
|
|
|
|
|
|
|
|
/* Alter conntrack table so will recognize replies. */
|
2019-01-15 21:03:42 +00:00
|
|
|
nf_ct_invert_tuple(&reply, &new_tuple);
|
2006-12-03 06:07:13 +00:00
|
|
|
nf_conntrack_alter_reply(ct, &reply);
|
|
|
|
|
|
|
|
/* Non-atomic: we own this at the moment. */
|
2011-12-23 12:59:49 +00:00
|
|
|
if (maniptype == NF_NAT_MANIP_SRC)
|
2006-12-03 06:07:13 +00:00
|
|
|
ct->status |= IPS_SRC_NAT;
|
|
|
|
else
|
|
|
|
ct->status |= IPS_DST_NAT;
|
2013-08-27 06:50:12 +00:00
|
|
|
|
2017-08-10 02:22:24 +00:00
|
|
|
if (nfct_help(ct) && !nfct_seqadj(ct))
|
2016-09-13 00:49:18 +00:00
|
|
|
if (!nfct_seqadj_ext_add(ct))
|
|
|
|
return NF_DROP;
|
2006-12-03 06:07:13 +00:00
|
|
|
}
|
|
|
|
|
2011-12-23 12:59:49 +00:00
|
|
|
if (maniptype == NF_NAT_MANIP_SRC) {
|
2017-09-06 12:39:51 +00:00
|
|
|
unsigned int srchash;
|
2017-09-06 12:39:52 +00:00
|
|
|
spinlock_t *lock;
|
2017-09-06 12:39:51 +00:00
|
|
|
|
|
|
|
srchash = hash_by_src(net,
|
|
|
|
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
|
2017-09-10 11:41:41 +00:00
|
|
|
lock = &nf_nat_locks[srchash % CONNTRACK_LOCKS];
|
2017-09-06 12:39:52 +00:00
|
|
|
spin_lock_bh(lock);
|
2017-09-06 12:39:51 +00:00
|
|
|
hlist_add_head_rcu(&ct->nat_bysource,
|
|
|
|
&nf_nat_bysource[srchash]);
|
2017-09-06 12:39:52 +00:00
|
|
|
spin_unlock_bh(lock);
|
2006-12-03 06:07:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* It's done. */
|
2011-12-23 12:59:49 +00:00
|
|
|
if (maniptype == NF_NAT_MANIP_DST)
|
2011-01-18 14:02:48 +00:00
|
|
|
ct->status |= IPS_DST_NAT_DONE;
|
2006-12-03 06:07:13 +00:00
|
|
|
else
|
2011-01-18 14:02:48 +00:00
|
|
|
ct->status |= IPS_SRC_NAT_DONE;
|
2006-12-03 06:07:13 +00:00
|
|
|
|
|
|
|
return NF_ACCEPT;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(nf_nat_setup_info);
|
|
|
|
|
netfilter: ctnetlink: force null nat binding on insert
Quoting Andrey Vagin:
When a conntrack is created by kernel, it is initialized (sets
IPS_{DST,SRC}_NAT_DONE_BIT bits in nf_nat_setup_info) and only then it
is added in hashes (__nf_conntrack_hash_insert), so one conntract
can't be initialized from a few threads concurrently.
ctnetlink can add an uninitialized conntrack (w/o
IPS_{DST,SRC}_NAT_DONE_BIT) in hashes, then a few threads can look up
this conntrack and start initialize it concurrently. It's dangerous,
because BUG can be triggered from nf_nat_setup_info.
Fix this race by always setting up nat, even if no CTA_NAT_ attribute
was requested before inserting the ct into the hash table. In absence
of CTA_NAT_ attribute, a null binding is created.
This alters current behaviour: Before this patch, the first packet
matching the newly injected conntrack would be run through the nat
table since nf_nat_initialized() returns false. IOW, this forces
ctnetlink users to specify the desired nat transformation on ct
creation time.
Thanks for Florian Westphal, this patch is based on his original
patch to address this problem, including this patch description.
Reported-By: Andrey Vagin <avagin@gmail.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Acked-by: Florian Westphal <fw@strlen.de>
2014-02-16 11:15:43 +00:00
|
|
|
static unsigned int
|
|
|
|
__nf_nat_alloc_null_binding(struct nf_conn *ct, enum nf_nat_manip_type manip)
|
2013-10-14 08:57:04 +00:00
|
|
|
{
|
|
|
|
/* Force range to this IP; let proto decide mapping for
|
|
|
|
* per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED).
|
|
|
|
* Use reply in case it's already been mangled (eg local packet).
|
|
|
|
*/
|
|
|
|
union nf_inet_addr ip =
|
netfilter: ctnetlink: force null nat binding on insert
Quoting Andrey Vagin:
When a conntrack is created by kernel, it is initialized (sets
IPS_{DST,SRC}_NAT_DONE_BIT bits in nf_nat_setup_info) and only then it
is added in hashes (__nf_conntrack_hash_insert), so one conntract
can't be initialized from a few threads concurrently.
ctnetlink can add an uninitialized conntrack (w/o
IPS_{DST,SRC}_NAT_DONE_BIT) in hashes, then a few threads can look up
this conntrack and start initialize it concurrently. It's dangerous,
because BUG can be triggered from nf_nat_setup_info.
Fix this race by always setting up nat, even if no CTA_NAT_ attribute
was requested before inserting the ct into the hash table. In absence
of CTA_NAT_ attribute, a null binding is created.
This alters current behaviour: Before this patch, the first packet
matching the newly injected conntrack would be run through the nat
table since nf_nat_initialized() returns false. IOW, this forces
ctnetlink users to specify the desired nat transformation on ct
creation time.
Thanks for Florian Westphal, this patch is based on his original
patch to address this problem, including this patch description.
Reported-By: Andrey Vagin <avagin@gmail.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Acked-by: Florian Westphal <fw@strlen.de>
2014-02-16 11:15:43 +00:00
|
|
|
(manip == NF_NAT_MANIP_SRC ?
|
2013-10-14 08:57:04 +00:00
|
|
|
ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3 :
|
|
|
|
ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3);
|
2018-04-04 13:38:22 +00:00
|
|
|
struct nf_nat_range2 range = {
|
2013-10-14 08:57:04 +00:00
|
|
|
.flags = NF_NAT_RANGE_MAP_IPS,
|
|
|
|
.min_addr = ip,
|
|
|
|
.max_addr = ip,
|
|
|
|
};
|
netfilter: ctnetlink: force null nat binding on insert
Quoting Andrey Vagin:
When a conntrack is created by kernel, it is initialized (sets
IPS_{DST,SRC}_NAT_DONE_BIT bits in nf_nat_setup_info) and only then it
is added in hashes (__nf_conntrack_hash_insert), so one conntract
can't be initialized from a few threads concurrently.
ctnetlink can add an uninitialized conntrack (w/o
IPS_{DST,SRC}_NAT_DONE_BIT) in hashes, then a few threads can look up
this conntrack and start initialize it concurrently. It's dangerous,
because BUG can be triggered from nf_nat_setup_info.
Fix this race by always setting up nat, even if no CTA_NAT_ attribute
was requested before inserting the ct into the hash table. In absence
of CTA_NAT_ attribute, a null binding is created.
This alters current behaviour: Before this patch, the first packet
matching the newly injected conntrack would be run through the nat
table since nf_nat_initialized() returns false. IOW, this forces
ctnetlink users to specify the desired nat transformation on ct
creation time.
Thanks for Florian Westphal, this patch is based on his original
patch to address this problem, including this patch description.
Reported-By: Andrey Vagin <avagin@gmail.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Acked-by: Florian Westphal <fw@strlen.de>
2014-02-16 11:15:43 +00:00
|
|
|
return nf_nat_setup_info(ct, &range, manip);
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned int
|
|
|
|
nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
|
|
|
|
{
|
|
|
|
return __nf_nat_alloc_null_binding(ct, HOOK2MANIP(hooknum));
|
2013-10-14 08:57:04 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nf_nat_alloc_null_binding);
|
|
|
|
|
2006-12-03 06:07:13 +00:00
|
|
|
/* Do packet manipulations according to nf_nat_setup_info. */
|
|
|
|
unsigned int nf_nat_packet(struct nf_conn *ct,
|
|
|
|
enum ip_conntrack_info ctinfo,
|
|
|
|
unsigned int hooknum,
|
2007-10-15 07:53:15 +00:00
|
|
|
struct sk_buff *skb)
|
2006-12-03 06:07:13 +00:00
|
|
|
{
|
2018-05-23 07:17:24 +00:00
|
|
|
enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum);
|
2006-12-03 06:07:13 +00:00
|
|
|
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
|
2018-05-23 07:17:24 +00:00
|
|
|
unsigned int verdict = NF_ACCEPT;
|
2006-12-03 06:07:13 +00:00
|
|
|
unsigned long statusbit;
|
|
|
|
|
2011-12-23 12:59:49 +00:00
|
|
|
if (mtype == NF_NAT_MANIP_SRC)
|
2006-12-03 06:07:13 +00:00
|
|
|
statusbit = IPS_SRC_NAT;
|
|
|
|
else
|
|
|
|
statusbit = IPS_DST_NAT;
|
|
|
|
|
|
|
|
/* Invert if this is reply dir. */
|
|
|
|
if (dir == IP_CT_DIR_REPLY)
|
|
|
|
statusbit ^= IPS_NAT_MASK;
|
|
|
|
|
|
|
|
/* Non-atomic: these bits don't change. */
|
2018-05-23 07:17:24 +00:00
|
|
|
if (ct->status & statusbit)
|
|
|
|
verdict = nf_nat_manip_pkt(skb, ct, mtype, dir);
|
2006-12-03 06:07:13 +00:00
|
|
|
|
2018-05-23 07:17:24 +00:00
|
|
|
return verdict;
|
2006-12-03 06:07:13 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nf_nat_packet);
|
|
|
|
|
2018-05-14 21:46:53 +00:00
|
|
|
unsigned int
|
|
|
|
nf_nat_inet_fn(void *priv, struct sk_buff *skb,
|
netfilter: nf_nat: add nat type hooks to nat core
Currently the packet rewrite and instantiation of nat NULL bindings
happens from the protocol specific nat backend.
Invocation occurs either via ip(6)table_nat or the nf_tables nat chain type.
Invocation looks like this (simplified):
NF_HOOK()
|
`---iptable_nat
|
`---> nf_nat_l3proto_ipv4 -> nf_nat_packet
|
new packet? pass skb though iptables nat chain
|
`---> iptable_nat: ipt_do_table
In nft case, this looks the same (nft_chain_nat_ipv4 instead of
iptable_nat).
This is a problem for two reasons:
1. Can't use iptables nat and nf_tables nat at the same time,
as the first user adds a nat binding (nf_nat_l3proto_ipv4 adds a
NULL binding if do_table() did not find a matching nat rule so we
can detect post-nat tuple collisions).
2. If you use e.g. nft_masq, snat, redir, etc. uses must also register
an empty base chain so that the nat core gets called fro NF_HOOK()
to do the reverse translation, which is neither obvious nor user
friendly.
After this change, the base hook gets registered not from iptable_nat or
nftables nat hooks, but from the l3 nat core.
iptables/nft nat base hooks get registered with the nat core instead:
NF_HOOK()
|
`---> nf_nat_l3proto_ipv4 -> nf_nat_packet
|
new packet? pass skb through iptables/nftables nat chains
|
+-> iptables_nat: ipt_do_table
+-> nft nat chain x
`-> nft nat chain y
The nat core deals with null bindings and reverse translation.
When no mapping exists, it calls the registered nat lookup hooks until
one creates a new mapping.
If both iptables and nftables nat hooks exist, the first matching
one is used (i.e., higher priority wins).
Also, nft users do not need to create empty nat hooks anymore,
nat core always registers the base hooks that take care of reverse/reply
translation.
Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
2018-05-14 21:46:58 +00:00
|
|
|
const struct nf_hook_state *state)
|
2018-05-14 21:46:53 +00:00
|
|
|
{
|
|
|
|
struct nf_conn *ct;
|
|
|
|
enum ip_conntrack_info ctinfo;
|
|
|
|
struct nf_conn_nat *nat;
|
|
|
|
/* maniptype == SRC for postrouting. */
|
|
|
|
enum nf_nat_manip_type maniptype = HOOK2MANIP(state->hook);
|
|
|
|
|
|
|
|
ct = nf_ct_get(skb, &ctinfo);
|
|
|
|
/* Can't track? It's not due to stress, or conntrack would
|
|
|
|
* have dropped it. Hence it's the user's responsibilty to
|
|
|
|
* packet filter it out, or implement conntrack/NAT for that
|
|
|
|
* protocol. 8) --RR
|
|
|
|
*/
|
|
|
|
if (!ct)
|
|
|
|
return NF_ACCEPT;
|
|
|
|
|
|
|
|
nat = nfct_nat(ct);
|
|
|
|
|
|
|
|
switch (ctinfo) {
|
|
|
|
case IP_CT_RELATED:
|
|
|
|
case IP_CT_RELATED_REPLY:
|
|
|
|
/* Only ICMPs can be IP_CT_IS_REPLY. Fallthrough */
|
|
|
|
case IP_CT_NEW:
|
|
|
|
/* Seen it before? This can happen for loopback, retrans,
|
|
|
|
* or local packets.
|
|
|
|
*/
|
|
|
|
if (!nf_nat_initialized(ct, maniptype)) {
|
netfilter: nf_nat: add nat type hooks to nat core
Currently the packet rewrite and instantiation of nat NULL bindings
happens from the protocol specific nat backend.
Invocation occurs either via ip(6)table_nat or the nf_tables nat chain type.
Invocation looks like this (simplified):
NF_HOOK()
|
`---iptable_nat
|
`---> nf_nat_l3proto_ipv4 -> nf_nat_packet
|
new packet? pass skb though iptables nat chain
|
`---> iptable_nat: ipt_do_table
In nft case, this looks the same (nft_chain_nat_ipv4 instead of
iptable_nat).
This is a problem for two reasons:
1. Can't use iptables nat and nf_tables nat at the same time,
as the first user adds a nat binding (nf_nat_l3proto_ipv4 adds a
NULL binding if do_table() did not find a matching nat rule so we
can detect post-nat tuple collisions).
2. If you use e.g. nft_masq, snat, redir, etc. uses must also register
an empty base chain so that the nat core gets called fro NF_HOOK()
to do the reverse translation, which is neither obvious nor user
friendly.
After this change, the base hook gets registered not from iptable_nat or
nftables nat hooks, but from the l3 nat core.
iptables/nft nat base hooks get registered with the nat core instead:
NF_HOOK()
|
`---> nf_nat_l3proto_ipv4 -> nf_nat_packet
|
new packet? pass skb through iptables/nftables nat chains
|
+-> iptables_nat: ipt_do_table
+-> nft nat chain x
`-> nft nat chain y
The nat core deals with null bindings and reverse translation.
When no mapping exists, it calls the registered nat lookup hooks until
one creates a new mapping.
If both iptables and nftables nat hooks exist, the first matching
one is used (i.e., higher priority wins).
Also, nft users do not need to create empty nat hooks anymore,
nat core always registers the base hooks that take care of reverse/reply
translation.
Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
2018-05-14 21:46:58 +00:00
|
|
|
struct nf_nat_lookup_hook_priv *lpriv = priv;
|
|
|
|
struct nf_hook_entries *e = rcu_dereference(lpriv->entries);
|
2018-05-14 21:46:53 +00:00
|
|
|
unsigned int ret;
|
netfilter: nf_nat: add nat type hooks to nat core
Currently the packet rewrite and instantiation of nat NULL bindings
happens from the protocol specific nat backend.
Invocation occurs either via ip(6)table_nat or the nf_tables nat chain type.
Invocation looks like this (simplified):
NF_HOOK()
|
`---iptable_nat
|
`---> nf_nat_l3proto_ipv4 -> nf_nat_packet
|
new packet? pass skb though iptables nat chain
|
`---> iptable_nat: ipt_do_table
In nft case, this looks the same (nft_chain_nat_ipv4 instead of
iptable_nat).
This is a problem for two reasons:
1. Can't use iptables nat and nf_tables nat at the same time,
as the first user adds a nat binding (nf_nat_l3proto_ipv4 adds a
NULL binding if do_table() did not find a matching nat rule so we
can detect post-nat tuple collisions).
2. If you use e.g. nft_masq, snat, redir, etc. uses must also register
an empty base chain so that the nat core gets called fro NF_HOOK()
to do the reverse translation, which is neither obvious nor user
friendly.
After this change, the base hook gets registered not from iptable_nat or
nftables nat hooks, but from the l3 nat core.
iptables/nft nat base hooks get registered with the nat core instead:
NF_HOOK()
|
`---> nf_nat_l3proto_ipv4 -> nf_nat_packet
|
new packet? pass skb through iptables/nftables nat chains
|
+-> iptables_nat: ipt_do_table
+-> nft nat chain x
`-> nft nat chain y
The nat core deals with null bindings and reverse translation.
When no mapping exists, it calls the registered nat lookup hooks until
one creates a new mapping.
If both iptables and nftables nat hooks exist, the first matching
one is used (i.e., higher priority wins).
Also, nft users do not need to create empty nat hooks anymore,
nat core always registers the base hooks that take care of reverse/reply
translation.
Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
2018-05-14 21:46:58 +00:00
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!e)
|
|
|
|
goto null_bind;
|
|
|
|
|
|
|
|
for (i = 0; i < e->num_hook_entries; i++) {
|
|
|
|
ret = e->hooks[i].hook(e->hooks[i].priv, skb,
|
|
|
|
state);
|
|
|
|
if (ret != NF_ACCEPT)
|
|
|
|
return ret;
|
|
|
|
if (nf_nat_initialized(ct, maniptype))
|
|
|
|
goto do_nat;
|
|
|
|
}
|
|
|
|
null_bind:
|
2018-05-14 21:46:53 +00:00
|
|
|
ret = nf_nat_alloc_null_binding(ct, state->hook);
|
|
|
|
if (ret != NF_ACCEPT)
|
|
|
|
return ret;
|
|
|
|
} else {
|
|
|
|
pr_debug("Already setup manip %s for ct %p (status bits 0x%lx)\n",
|
|
|
|
maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
|
|
|
|
ct, ct->status);
|
|
|
|
if (nf_nat_oif_changed(state->hook, ctinfo, nat,
|
|
|
|
state->out))
|
|
|
|
goto oif_changed;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* ESTABLISHED */
|
|
|
|
WARN_ON(ctinfo != IP_CT_ESTABLISHED &&
|
|
|
|
ctinfo != IP_CT_ESTABLISHED_REPLY);
|
|
|
|
if (nf_nat_oif_changed(state->hook, ctinfo, nat, state->out))
|
|
|
|
goto oif_changed;
|
|
|
|
}
|
netfilter: nf_nat: add nat type hooks to nat core
Currently the packet rewrite and instantiation of nat NULL bindings
happens from the protocol specific nat backend.
Invocation occurs either via ip(6)table_nat or the nf_tables nat chain type.
Invocation looks like this (simplified):
NF_HOOK()
|
`---iptable_nat
|
`---> nf_nat_l3proto_ipv4 -> nf_nat_packet
|
new packet? pass skb though iptables nat chain
|
`---> iptable_nat: ipt_do_table
In nft case, this looks the same (nft_chain_nat_ipv4 instead of
iptable_nat).
This is a problem for two reasons:
1. Can't use iptables nat and nf_tables nat at the same time,
as the first user adds a nat binding (nf_nat_l3proto_ipv4 adds a
NULL binding if do_table() did not find a matching nat rule so we
can detect post-nat tuple collisions).
2. If you use e.g. nft_masq, snat, redir, etc. uses must also register
an empty base chain so that the nat core gets called fro NF_HOOK()
to do the reverse translation, which is neither obvious nor user
friendly.
After this change, the base hook gets registered not from iptable_nat or
nftables nat hooks, but from the l3 nat core.
iptables/nft nat base hooks get registered with the nat core instead:
NF_HOOK()
|
`---> nf_nat_l3proto_ipv4 -> nf_nat_packet
|
new packet? pass skb through iptables/nftables nat chains
|
+-> iptables_nat: ipt_do_table
+-> nft nat chain x
`-> nft nat chain y
The nat core deals with null bindings and reverse translation.
When no mapping exists, it calls the registered nat lookup hooks until
one creates a new mapping.
If both iptables and nftables nat hooks exist, the first matching
one is used (i.e., higher priority wins).
Also, nft users do not need to create empty nat hooks anymore,
nat core always registers the base hooks that take care of reverse/reply
translation.
Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
2018-05-14 21:46:58 +00:00
|
|
|
do_nat:
|
2018-05-14 21:46:53 +00:00
|
|
|
return nf_nat_packet(ct, ctinfo, state->hook, skb);
|
|
|
|
|
|
|
|
oif_changed:
|
|
|
|
nf_ct_kill_acct(ct, ctinfo, skb);
|
|
|
|
return NF_DROP;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nf_nat_inet_fn);
|
|
|
|
|
2012-08-26 17:14:06 +00:00
|
|
|
struct nf_nat_proto_clean {
|
|
|
|
u8 l3proto;
|
|
|
|
u8 l4proto;
|
|
|
|
};
|
|
|
|
|
2013-04-11 04:22:39 +00:00
|
|
|
/* kill conntracks with affected NAT section */
|
|
|
|
static int nf_nat_proto_remove(struct nf_conn *i, void *data)
|
2006-12-03 06:07:13 +00:00
|
|
|
{
|
2012-08-26 17:14:06 +00:00
|
|
|
const struct nf_nat_proto_clean *clean = data;
|
2013-04-11 04:22:39 +00:00
|
|
|
|
2012-08-26 17:14:06 +00:00
|
|
|
if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) ||
|
|
|
|
(clean->l4proto && nf_ct_protonum(i) != clean->l4proto))
|
2006-12-03 06:07:13 +00:00
|
|
|
return 0;
|
|
|
|
|
2013-04-11 04:22:39 +00:00
|
|
|
return i->status & IPS_NAT_MASK ? 1 : 0;
|
2012-08-26 17:14:06 +00:00
|
|
|
}
|
2006-12-03 06:07:13 +00:00
|
|
|
|
2017-09-06 12:39:52 +00:00
|
|
|
static void __nf_nat_cleanup_conntrack(struct nf_conn *ct)
|
|
|
|
{
|
|
|
|
unsigned int h;
|
|
|
|
|
|
|
|
h = hash_by_src(nf_ct_net(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
|
2017-09-10 11:41:41 +00:00
|
|
|
spin_lock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);
|
2017-09-06 12:39:52 +00:00
|
|
|
hlist_del_rcu(&ct->nat_bysource);
|
2017-09-10 11:41:41 +00:00
|
|
|
spin_unlock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);
|
2017-09-06 12:39:52 +00:00
|
|
|
}
|
|
|
|
|
2014-06-07 19:17:04 +00:00
|
|
|
static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
|
|
|
|
{
|
|
|
|
if (nf_nat_proto_remove(ct, data))
|
|
|
|
return 1;
|
|
|
|
|
2017-10-05 14:46:45 +00:00
|
|
|
/* This module is being removed and conntrack has nat null binding.
|
2014-06-07 19:17:04 +00:00
|
|
|
* Remove it from bysource hash, as the table will be freed soon.
|
|
|
|
*
|
|
|
|
* Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
|
|
|
|
* will delete entry from already-freed table.
|
|
|
|
*/
|
2017-10-05 14:46:45 +00:00
|
|
|
if (test_and_clear_bit(IPS_SRC_NAT_DONE_BIT, &ct->status))
|
|
|
|
__nf_nat_cleanup_conntrack(ct);
|
2014-06-07 19:17:04 +00:00
|
|
|
|
|
|
|
/* don't delete conntrack. Although that would make things a lot
|
|
|
|
* simpler, we'd end up flushing all conntracks on nat rmmod.
|
|
|
|
*/
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-03-31 01:57:33 +00:00
|
|
|
/* No one using conntrack by the time this called. */
|
2007-07-08 05:26:16 +00:00
|
|
|
static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
|
|
|
|
{
|
2017-09-06 12:39:52 +00:00
|
|
|
if (ct->status & IPS_SRC_NAT_DONE)
|
|
|
|
__nf_nat_cleanup_conntrack(ct);
|
2007-07-08 05:24:28 +00:00
|
|
|
}
|
|
|
|
|
2007-07-08 05:27:06 +00:00
|
|
|
static struct nf_ct_ext_type nat_extend __read_mostly = {
|
2007-07-08 05:26:16 +00:00
|
|
|
.len = sizeof(struct nf_conn_nat),
|
|
|
|
.align = __alignof__(struct nf_conn_nat),
|
|
|
|
.destroy = nf_nat_cleanup_conntrack,
|
|
|
|
.id = NF_CT_EXT_NAT,
|
2007-07-08 05:24:28 +00:00
|
|
|
};
|
|
|
|
|
2014-06-30 01:19:32 +00:00
|
|
|
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
|
2008-10-14 18:58:31 +00:00
|
|
|
|
|
|
|
#include <linux/netfilter/nfnetlink.h>
|
|
|
|
#include <linux/netfilter/nfnetlink_conntrack.h>
|
|
|
|
|
|
|
|
static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = {
|
|
|
|
[CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 },
|
|
|
|
[CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 },
|
|
|
|
};
|
|
|
|
|
2018-12-13 15:01:32 +00:00
|
|
|
static int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[],
|
|
|
|
struct nf_nat_range2 *range)
|
|
|
|
{
|
|
|
|
if (tb[CTA_PROTONAT_PORT_MIN]) {
|
|
|
|
range->min_proto.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MIN]);
|
|
|
|
range->max_proto.all = range->min_proto.all;
|
|
|
|
range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
|
|
|
|
}
|
|
|
|
if (tb[CTA_PROTONAT_PORT_MAX]) {
|
|
|
|
range->max_proto.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MAX]);
|
|
|
|
range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-10-14 18:58:31 +00:00
|
|
|
static int nfnetlink_parse_nat_proto(struct nlattr *attr,
|
|
|
|
const struct nf_conn *ct,
|
2018-04-04 13:38:22 +00:00
|
|
|
struct nf_nat_range2 *range)
|
2008-10-14 18:58:31 +00:00
|
|
|
{
|
|
|
|
struct nlattr *tb[CTA_PROTONAT_MAX+1];
|
|
|
|
int err;
|
|
|
|
|
2017-04-12 12:34:07 +00:00
|
|
|
err = nla_parse_nested(tb, CTA_PROTONAT_MAX, attr,
|
|
|
|
protonat_nla_policy, NULL);
|
2008-10-14 18:58:31 +00:00
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
|
2018-12-13 15:01:32 +00:00
|
|
|
return nf_nat_l4proto_nlattr_to_range(tb, range);
|
2008-10-14 18:58:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = {
|
2012-08-26 17:14:06 +00:00
|
|
|
[CTA_NAT_V4_MINIP] = { .type = NLA_U32 },
|
|
|
|
[CTA_NAT_V4_MAXIP] = { .type = NLA_U32 },
|
2012-08-26 17:14:12 +00:00
|
|
|
[CTA_NAT_V6_MINIP] = { .len = sizeof(struct in6_addr) },
|
|
|
|
[CTA_NAT_V6_MAXIP] = { .len = sizeof(struct in6_addr) },
|
2011-12-23 13:00:30 +00:00
|
|
|
[CTA_NAT_PROTO] = { .type = NLA_NESTED },
|
2008-10-14 18:58:31 +00:00
|
|
|
};
|
|
|
|
|
2019-02-19 16:38:20 +00:00
|
|
|
static int nf_nat_ipv4_nlattr_to_range(struct nlattr *tb[],
|
|
|
|
struct nf_nat_range2 *range)
|
|
|
|
{
|
|
|
|
if (tb[CTA_NAT_V4_MINIP]) {
|
|
|
|
range->min_addr.ip = nla_get_be32(tb[CTA_NAT_V4_MINIP]);
|
|
|
|
range->flags |= NF_NAT_RANGE_MAP_IPS;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tb[CTA_NAT_V4_MAXIP])
|
|
|
|
range->max_addr.ip = nla_get_be32(tb[CTA_NAT_V4_MAXIP]);
|
|
|
|
else
|
|
|
|
range->max_addr.ip = range->min_addr.ip;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nf_nat_ipv6_nlattr_to_range(struct nlattr *tb[],
|
|
|
|
struct nf_nat_range2 *range)
|
|
|
|
{
|
|
|
|
if (tb[CTA_NAT_V6_MINIP]) {
|
|
|
|
nla_memcpy(&range->min_addr.ip6, tb[CTA_NAT_V6_MINIP],
|
|
|
|
sizeof(struct in6_addr));
|
|
|
|
range->flags |= NF_NAT_RANGE_MAP_IPS;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tb[CTA_NAT_V6_MAXIP])
|
|
|
|
nla_memcpy(&range->max_addr.ip6, tb[CTA_NAT_V6_MAXIP],
|
|
|
|
sizeof(struct in6_addr));
|
|
|
|
else
|
|
|
|
range->max_addr = range->min_addr;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-10-14 18:58:31 +00:00
|
|
|
static int
|
2009-08-25 14:07:58 +00:00
|
|
|
nfnetlink_parse_nat(const struct nlattr *nat,
|
2019-02-19 16:38:20 +00:00
|
|
|
const struct nf_conn *ct, struct nf_nat_range2 *range)
|
2008-10-14 18:58:31 +00:00
|
|
|
{
|
|
|
|
struct nlattr *tb[CTA_NAT_MAX+1];
|
|
|
|
int err;
|
|
|
|
|
|
|
|
memset(range, 0, sizeof(*range));
|
|
|
|
|
2017-04-12 12:34:07 +00:00
|
|
|
err = nla_parse_nested(tb, CTA_NAT_MAX, nat, nat_nla_policy, NULL);
|
2008-10-14 18:58:31 +00:00
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
|
2019-02-19 16:38:20 +00:00
|
|
|
switch (nf_ct_l3num(ct)) {
|
|
|
|
case NFPROTO_IPV4:
|
|
|
|
err = nf_nat_ipv4_nlattr_to_range(tb, range);
|
|
|
|
break;
|
|
|
|
case NFPROTO_IPV6:
|
|
|
|
err = nf_nat_ipv6_nlattr_to_range(tb, range);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
err = -EPROTONOSUPPORT;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (err)
|
netfilter: ctnetlink: force null nat binding on insert
Quoting Andrey Vagin:
When a conntrack is created by kernel, it is initialized (sets
IPS_{DST,SRC}_NAT_DONE_BIT bits in nf_nat_setup_info) and only then it
is added in hashes (__nf_conntrack_hash_insert), so one conntract
can't be initialized from a few threads concurrently.
ctnetlink can add an uninitialized conntrack (w/o
IPS_{DST,SRC}_NAT_DONE_BIT) in hashes, then a few threads can look up
this conntrack and start initialize it concurrently. It's dangerous,
because BUG can be triggered from nf_nat_setup_info.
Fix this race by always setting up nat, even if no CTA_NAT_ attribute
was requested before inserting the ct into the hash table. In absence
of CTA_NAT_ attribute, a null binding is created.
This alters current behaviour: Before this patch, the first packet
matching the newly injected conntrack would be run through the nat
table since nf_nat_initialized() returns false. IOW, this forces
ctnetlink users to specify the desired nat transformation on ct
creation time.
Thanks for Florian Westphal, this patch is based on his original
patch to address this problem, including this patch description.
Reported-By: Andrey Vagin <avagin@gmail.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Acked-by: Florian Westphal <fw@strlen.de>
2014-02-16 11:15:43 +00:00
|
|
|
return err;
|
2008-10-14 18:58:31 +00:00
|
|
|
|
|
|
|
if (!tb[CTA_NAT_PROTO])
|
netfilter: ctnetlink: force null nat binding on insert
Quoting Andrey Vagin:
When a conntrack is created by kernel, it is initialized (sets
IPS_{DST,SRC}_NAT_DONE_BIT bits in nf_nat_setup_info) and only then it
is added in hashes (__nf_conntrack_hash_insert), so one conntract
can't be initialized from a few threads concurrently.
ctnetlink can add an uninitialized conntrack (w/o
IPS_{DST,SRC}_NAT_DONE_BIT) in hashes, then a few threads can look up
this conntrack and start initialize it concurrently. It's dangerous,
because BUG can be triggered from nf_nat_setup_info.
Fix this race by always setting up nat, even if no CTA_NAT_ attribute
was requested before inserting the ct into the hash table. In absence
of CTA_NAT_ attribute, a null binding is created.
This alters current behaviour: Before this patch, the first packet
matching the newly injected conntrack would be run through the nat
table since nf_nat_initialized() returns false. IOW, this forces
ctnetlink users to specify the desired nat transformation on ct
creation time.
Thanks for Florian Westphal, this patch is based on his original
patch to address this problem, including this patch description.
Reported-By: Andrey Vagin <avagin@gmail.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Acked-by: Florian Westphal <fw@strlen.de>
2014-02-16 11:15:43 +00:00
|
|
|
return 0;
|
2008-10-14 18:58:31 +00:00
|
|
|
|
netfilter: ctnetlink: force null nat binding on insert
Quoting Andrey Vagin:
When a conntrack is created by kernel, it is initialized (sets
IPS_{DST,SRC}_NAT_DONE_BIT bits in nf_nat_setup_info) and only then it
is added in hashes (__nf_conntrack_hash_insert), so one conntract
can't be initialized from a few threads concurrently.
ctnetlink can add an uninitialized conntrack (w/o
IPS_{DST,SRC}_NAT_DONE_BIT) in hashes, then a few threads can look up
this conntrack and start initialize it concurrently. It's dangerous,
because BUG can be triggered from nf_nat_setup_info.
Fix this race by always setting up nat, even if no CTA_NAT_ attribute
was requested before inserting the ct into the hash table. In absence
of CTA_NAT_ attribute, a null binding is created.
This alters current behaviour: Before this patch, the first packet
matching the newly injected conntrack would be run through the nat
table since nf_nat_initialized() returns false. IOW, this forces
ctnetlink users to specify the desired nat transformation on ct
creation time.
Thanks for Florian Westphal, this patch is based on his original
patch to address this problem, including this patch description.
Reported-By: Andrey Vagin <avagin@gmail.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Acked-by: Florian Westphal <fw@strlen.de>
2014-02-16 11:15:43 +00:00
|
|
|
return nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range);
|
2008-10-14 18:58:31 +00:00
|
|
|
}
|
|
|
|
|
netfilter: ctnetlink: force null nat binding on insert
Quoting Andrey Vagin:
When a conntrack is created by kernel, it is initialized (sets
IPS_{DST,SRC}_NAT_DONE_BIT bits in nf_nat_setup_info) and only then it
is added in hashes (__nf_conntrack_hash_insert), so one conntract
can't be initialized from a few threads concurrently.
ctnetlink can add an uninitialized conntrack (w/o
IPS_{DST,SRC}_NAT_DONE_BIT) in hashes, then a few threads can look up
this conntrack and start initialize it concurrently. It's dangerous,
because BUG can be triggered from nf_nat_setup_info.
Fix this race by always setting up nat, even if no CTA_NAT_ attribute
was requested before inserting the ct into the hash table. In absence
of CTA_NAT_ attribute, a null binding is created.
This alters current behaviour: Before this patch, the first packet
matching the newly injected conntrack would be run through the nat
table since nf_nat_initialized() returns false. IOW, this forces
ctnetlink users to specify the desired nat transformation on ct
creation time.
Thanks for Florian Westphal, this patch is based on his original
patch to address this problem, including this patch description.
Reported-By: Andrey Vagin <avagin@gmail.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Acked-by: Florian Westphal <fw@strlen.de>
2014-02-16 11:15:43 +00:00
|
|
|
/* This function is called under rcu_read_lock() */
|
2008-10-14 18:58:31 +00:00
|
|
|
static int
|
|
|
|
nfnetlink_parse_nat_setup(struct nf_conn *ct,
|
|
|
|
enum nf_nat_manip_type manip,
|
2009-08-25 14:07:58 +00:00
|
|
|
const struct nlattr *attr)
|
2008-10-14 18:58:31 +00:00
|
|
|
{
|
2018-04-04 13:38:22 +00:00
|
|
|
struct nf_nat_range2 range;
|
2012-08-26 17:14:06 +00:00
|
|
|
int err;
|
2008-10-14 18:58:31 +00:00
|
|
|
|
netfilter: ctnetlink: force null nat binding on insert
Quoting Andrey Vagin:
When a conntrack is created by kernel, it is initialized (sets
IPS_{DST,SRC}_NAT_DONE_BIT bits in nf_nat_setup_info) and only then it
is added in hashes (__nf_conntrack_hash_insert), so one conntract
can't be initialized from a few threads concurrently.
ctnetlink can add an uninitialized conntrack (w/o
IPS_{DST,SRC}_NAT_DONE_BIT) in hashes, then a few threads can look up
this conntrack and start initialize it concurrently. It's dangerous,
because BUG can be triggered from nf_nat_setup_info.
Fix this race by always setting up nat, even if no CTA_NAT_ attribute
was requested before inserting the ct into the hash table. In absence
of CTA_NAT_ attribute, a null binding is created.
This alters current behaviour: Before this patch, the first packet
matching the newly injected conntrack would be run through the nat
table since nf_nat_initialized() returns false. IOW, this forces
ctnetlink users to specify the desired nat transformation on ct
creation time.
Thanks for Florian Westphal, this patch is based on his original
patch to address this problem, including this patch description.
Reported-By: Andrey Vagin <avagin@gmail.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Acked-by: Florian Westphal <fw@strlen.de>
2014-02-16 11:15:43 +00:00
|
|
|
/* Should not happen, restricted to creating new conntracks
|
|
|
|
* via ctnetlink.
|
|
|
|
*/
|
|
|
|
if (WARN_ON_ONCE(nf_nat_initialized(ct, manip)))
|
|
|
|
return -EEXIST;
|
|
|
|
|
|
|
|
/* No NAT information has been passed, allocate the null-binding */
|
|
|
|
if (attr == NULL)
|
2017-04-12 10:33:03 +00:00
|
|
|
return __nf_nat_alloc_null_binding(ct, manip) == NF_DROP ? -ENOMEM : 0;
|
netfilter: ctnetlink: force null nat binding on insert
Quoting Andrey Vagin:
When a conntrack is created by kernel, it is initialized (sets
IPS_{DST,SRC}_NAT_DONE_BIT bits in nf_nat_setup_info) and only then it
is added in hashes (__nf_conntrack_hash_insert), so one conntract
can't be initialized from a few threads concurrently.
ctnetlink can add an uninitialized conntrack (w/o
IPS_{DST,SRC}_NAT_DONE_BIT) in hashes, then a few threads can look up
this conntrack and start initialize it concurrently. It's dangerous,
because BUG can be triggered from nf_nat_setup_info.
Fix this race by always setting up nat, even if no CTA_NAT_ attribute
was requested before inserting the ct into the hash table. In absence
of CTA_NAT_ attribute, a null binding is created.
This alters current behaviour: Before this patch, the first packet
matching the newly injected conntrack would be run through the nat
table since nf_nat_initialized() returns false. IOW, this forces
ctnetlink users to specify the desired nat transformation on ct
creation time.
Thanks for Florian Westphal, this patch is based on his original
patch to address this problem, including this patch description.
Reported-By: Andrey Vagin <avagin@gmail.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Acked-by: Florian Westphal <fw@strlen.de>
2014-02-16 11:15:43 +00:00
|
|
|
|
2019-02-19 16:38:20 +00:00
|
|
|
err = nfnetlink_parse_nat(attr, ct, &range);
|
2012-08-26 17:14:06 +00:00
|
|
|
if (err < 0)
|
|
|
|
return err;
|
2008-10-14 18:58:31 +00:00
|
|
|
|
2016-09-09 13:38:12 +00:00
|
|
|
return nf_nat_setup_info(ct, &range, manip) == NF_DROP ? -ENOMEM : 0;
|
2008-10-14 18:58:31 +00:00
|
|
|
}
|
|
|
|
#else
|
|
|
|
static int
|
|
|
|
nfnetlink_parse_nat_setup(struct nf_conn *ct,
|
|
|
|
enum nf_nat_manip_type manip,
|
2009-08-25 14:07:58 +00:00
|
|
|
const struct nlattr *attr)
|
2008-10-14 18:58:31 +00:00
|
|
|
{
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2012-02-05 02:44:51 +00:00
|
|
|
static struct nf_ct_helper_expectfn follow_master_nat = {
|
|
|
|
.name = "nat-follow-master",
|
|
|
|
.expectfn = nf_nat_follow_master,
|
|
|
|
};
|
|
|
|
|
2018-05-14 21:46:57 +00:00
|
|
|
int nf_nat_register_fn(struct net *net, const struct nf_hook_ops *ops,
|
|
|
|
const struct nf_hook_ops *orig_nat_ops, unsigned int ops_count)
|
|
|
|
{
|
|
|
|
struct nat_net *nat_net = net_generic(net, nat_net_id);
|
|
|
|
struct nf_nat_hooks_net *nat_proto_net;
|
|
|
|
struct nf_nat_lookup_hook_priv *priv;
|
|
|
|
unsigned int hooknum = ops->hooknum;
|
|
|
|
struct nf_hook_ops *nat_ops;
|
|
|
|
int i, ret;
|
|
|
|
|
|
|
|
if (WARN_ON_ONCE(ops->pf >= ARRAY_SIZE(nat_net->nat_proto_net)))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
nat_proto_net = &nat_net->nat_proto_net[ops->pf];
|
|
|
|
|
|
|
|
for (i = 0; i < ops_count; i++) {
|
|
|
|
if (WARN_ON(orig_nat_ops[i].pf != ops->pf))
|
|
|
|
return -EINVAL;
|
|
|
|
if (orig_nat_ops[i].hooknum == hooknum) {
|
|
|
|
hooknum = i;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (WARN_ON_ONCE(i == ops_count))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
mutex_lock(&nf_nat_proto_mutex);
|
|
|
|
if (!nat_proto_net->nat_hook_ops) {
|
|
|
|
WARN_ON(nat_proto_net->users != 0);
|
|
|
|
|
|
|
|
nat_ops = kmemdup(orig_nat_ops, sizeof(*orig_nat_ops) * ops_count, GFP_KERNEL);
|
|
|
|
if (!nat_ops) {
|
|
|
|
mutex_unlock(&nf_nat_proto_mutex);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < ops_count; i++) {
|
|
|
|
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
|
|
|
if (priv) {
|
|
|
|
nat_ops[i].priv = priv;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
mutex_unlock(&nf_nat_proto_mutex);
|
|
|
|
while (i)
|
|
|
|
kfree(nat_ops[--i].priv);
|
|
|
|
kfree(nat_ops);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = nf_register_net_hooks(net, nat_ops, ops_count);
|
|
|
|
if (ret < 0) {
|
|
|
|
mutex_unlock(&nf_nat_proto_mutex);
|
|
|
|
for (i = 0; i < ops_count; i++)
|
|
|
|
kfree(nat_ops[i].priv);
|
|
|
|
kfree(nat_ops);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
nat_proto_net->nat_hook_ops = nat_ops;
|
|
|
|
}
|
|
|
|
|
|
|
|
nat_ops = nat_proto_net->nat_hook_ops;
|
|
|
|
priv = nat_ops[hooknum].priv;
|
|
|
|
if (WARN_ON_ONCE(!priv)) {
|
|
|
|
mutex_unlock(&nf_nat_proto_mutex);
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = nf_hook_entries_insert_raw(&priv->entries, ops);
|
|
|
|
if (ret == 0)
|
|
|
|
nat_proto_net->users++;
|
|
|
|
|
|
|
|
mutex_unlock(&nf_nat_proto_mutex);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void nf_nat_unregister_fn(struct net *net, const struct nf_hook_ops *ops,
|
|
|
|
unsigned int ops_count)
|
|
|
|
{
|
|
|
|
struct nat_net *nat_net = net_generic(net, nat_net_id);
|
|
|
|
struct nf_nat_hooks_net *nat_proto_net;
|
|
|
|
struct nf_nat_lookup_hook_priv *priv;
|
|
|
|
struct nf_hook_ops *nat_ops;
|
|
|
|
int hooknum = ops->hooknum;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (ops->pf >= ARRAY_SIZE(nat_net->nat_proto_net))
|
|
|
|
return;
|
|
|
|
|
|
|
|
nat_proto_net = &nat_net->nat_proto_net[ops->pf];
|
|
|
|
|
|
|
|
mutex_lock(&nf_nat_proto_mutex);
|
|
|
|
if (WARN_ON(nat_proto_net->users == 0))
|
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
nat_proto_net->users--;
|
|
|
|
|
|
|
|
nat_ops = nat_proto_net->nat_hook_ops;
|
|
|
|
for (i = 0; i < ops_count; i++) {
|
|
|
|
if (nat_ops[i].hooknum == hooknum) {
|
|
|
|
hooknum = i;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (WARN_ON_ONCE(i == ops_count))
|
|
|
|
goto unlock;
|
|
|
|
priv = nat_ops[hooknum].priv;
|
|
|
|
nf_hook_entries_delete_raw(&priv->entries, ops);
|
|
|
|
|
|
|
|
if (nat_proto_net->users == 0) {
|
|
|
|
nf_unregister_net_hooks(net, nat_ops, ops_count);
|
|
|
|
|
|
|
|
for (i = 0; i < ops_count; i++) {
|
|
|
|
priv = nat_ops[i].priv;
|
|
|
|
kfree_rcu(priv, rcu_head);
|
|
|
|
}
|
|
|
|
|
|
|
|
nat_proto_net->nat_hook_ops = NULL;
|
|
|
|
kfree(nat_ops);
|
|
|
|
}
|
|
|
|
unlock:
|
|
|
|
mutex_unlock(&nf_nat_proto_mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct pernet_operations nat_net_ops = {
|
|
|
|
.id = &nat_net_id,
|
|
|
|
.size = sizeof(struct nat_net),
|
|
|
|
};
|
|
|
|
|
2018-05-26 09:48:53 +00:00
|
|
|
static struct nf_nat_hook nat_hook = {
|
2018-05-23 07:17:19 +00:00
|
|
|
.parse_nat_setup = nfnetlink_parse_nat_setup,
|
|
|
|
#ifdef CONFIG_XFRM
|
|
|
|
.decode_session = __nf_nat_decode_session,
|
|
|
|
#endif
|
2018-05-23 07:17:24 +00:00
|
|
|
.manip_pkt = nf_nat_manip_pkt,
|
2018-05-23 07:17:19 +00:00
|
|
|
};
|
|
|
|
|
2006-12-03 06:07:13 +00:00
|
|
|
static int __init nf_nat_init(void)
|
|
|
|
{
|
2017-09-06 12:39:52 +00:00
|
|
|
int ret, i;
|
2007-07-08 05:24:28 +00:00
|
|
|
|
2017-09-06 12:39:51 +00:00
|
|
|
/* Leave them the same for the moment. */
|
|
|
|
nf_nat_htable_size = nf_conntrack_htable_size;
|
2017-09-10 11:41:41 +00:00
|
|
|
if (nf_nat_htable_size < CONNTRACK_LOCKS)
|
|
|
|
nf_nat_htable_size = CONNTRACK_LOCKS;
|
2017-09-06 12:39:51 +00:00
|
|
|
|
|
|
|
nf_nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 0);
|
|
|
|
if (!nf_nat_bysource)
|
|
|
|
return -ENOMEM;
|
2016-05-09 14:24:31 +00:00
|
|
|
|
2007-07-08 05:24:28 +00:00
|
|
|
ret = nf_ct_extend_register(&nat_extend);
|
|
|
|
if (ret < 0) {
|
netfilter: use kvmalloc_array to allocate memory for hashtable
nf_ct_alloc_hashtable is used to allocate memory for conntrack,
NAT bysrc and expectation hashtable. Assuming 64k bucket size,
which means 7th order page allocation, __get_free_pages, called
by nf_ct_alloc_hashtable, will trigger the direct memory reclaim
and stall for a long time, when system has lots of memory stress
so replace combination of __get_free_pages and vzalloc with
kvmalloc_array, which provides a overflow check and a fallback
if no high order memory is available, and do not retry to reclaim
memory, reduce stall
and remove nf_ct_free_hashtable, since it is just a kvfree
Signed-off-by: Zhang Yu <zhangyu31@baidu.com>
Signed-off-by: Wang Li <wangli39@baidu.com>
Signed-off-by: Li RongQing <lirongqing@baidu.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
2018-07-25 07:52:13 +00:00
|
|
|
kvfree(nf_nat_bysource);
|
2018-03-12 13:06:29 +00:00
|
|
|
pr_err("Unable to register extension\n");
|
2007-07-08 05:24:28 +00:00
|
|
|
return ret;
|
|
|
|
}
|
2006-12-03 06:07:13 +00:00
|
|
|
|
2017-09-10 11:41:41 +00:00
|
|
|
for (i = 0; i < CONNTRACK_LOCKS; i++)
|
2017-09-06 12:39:52 +00:00
|
|
|
spin_lock_init(&nf_nat_locks[i]);
|
|
|
|
|
2018-05-14 21:46:57 +00:00
|
|
|
ret = register_pernet_subsys(&nat_net_ops);
|
|
|
|
if (ret < 0) {
|
|
|
|
nf_ct_extend_unregister(&nat_extend);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-08-26 17:14:06 +00:00
|
|
|
nf_ct_helper_expectfn_register(&follow_master_nat);
|
2006-12-03 06:07:13 +00:00
|
|
|
|
2018-05-23 07:17:19 +00:00
|
|
|
WARN_ON(nf_nat_hook != NULL);
|
|
|
|
RCU_INIT_POINTER(nf_nat_hook, &nat_hook);
|
|
|
|
|
2006-12-03 06:07:13 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit nf_nat_cleanup(void)
|
|
|
|
{
|
2017-05-21 10:52:59 +00:00
|
|
|
struct nf_nat_proto_clean clean = {};
|
2012-08-26 17:14:06 +00:00
|
|
|
|
2017-05-21 10:52:59 +00:00
|
|
|
nf_ct_iterate_destroy(nf_nat_proto_clean, &clean);
|
|
|
|
|
2007-07-08 05:24:28 +00:00
|
|
|
nf_ct_extend_unregister(&nat_extend);
|
2012-02-05 02:44:51 +00:00
|
|
|
nf_ct_helper_expectfn_unregister(&follow_master_nat);
|
2018-05-23 07:17:19 +00:00
|
|
|
RCU_INIT_POINTER(nf_nat_hook, NULL);
|
|
|
|
|
2017-09-06 12:39:51 +00:00
|
|
|
synchronize_net();
|
netfilter: use kvmalloc_array to allocate memory for hashtable
nf_ct_alloc_hashtable is used to allocate memory for conntrack,
NAT bysrc and expectation hashtable. Assuming 64k bucket size,
which means 7th order page allocation, __get_free_pages, called
by nf_ct_alloc_hashtable, will trigger the direct memory reclaim
and stall for a long time, when system has lots of memory stress
so replace combination of __get_free_pages and vzalloc with
kvmalloc_array, which provides a overflow check and a fallback
if no high order memory is available, and do not retry to reclaim
memory, reduce stall
and remove nf_ct_free_hashtable, since it is just a kvfree
Signed-off-by: Zhang Yu <zhangyu31@baidu.com>
Signed-off-by: Wang Li <wangli39@baidu.com>
Signed-off-by: Li RongQing <lirongqing@baidu.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
2018-07-25 07:52:13 +00:00
|
|
|
kvfree(nf_nat_bysource);
|
2018-05-14 21:46:57 +00:00
|
|
|
unregister_pernet_subsys(&nat_net_ops);
|
2006-12-03 06:07:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
|
|
|
|
module_init(nf_nat_init);
|
|
|
|
module_exit(nf_nat_cleanup);
|