2006-11-29 01:34:58 +00:00
|
|
|
/* Expectation handling for nf_conntrack. */
|
|
|
|
|
|
|
|
/* (C) 1999-2001 Paul `Rusty' Russell
|
|
|
|
* (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
|
|
|
|
* (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
|
2013-04-06 13:24:29 +00:00
|
|
|
* (c) 2005-2012 Patrick McHardy <kaber@trash.net>
|
2006-11-29 01:34:58 +00:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/netfilter.h>
|
|
|
|
#include <linux/skbuff.h>
|
|
|
|
#include <linux/proc_fs.h>
|
|
|
|
#include <linux/seq_file.h>
|
|
|
|
#include <linux/stddef.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/percpu.h>
|
|
|
|
#include <linux/kernel.h>
|
2007-07-08 05:33:47 +00:00
|
|
|
#include <linux/jhash.h>
|
2011-09-18 17:21:27 +00:00
|
|
|
#include <linux/moduleparam.h>
|
2011-07-15 15:47:34 +00:00
|
|
|
#include <linux/export.h>
|
2007-09-12 10:01:34 +00:00
|
|
|
#include <net/net_namespace.h>
|
2016-05-05 22:51:48 +00:00
|
|
|
#include <net/netns/hash.h>
|
2006-11-29 01:34:58 +00:00
|
|
|
|
|
|
|
#include <net/netfilter/nf_conntrack.h>
|
|
|
|
#include <net/netfilter/nf_conntrack_core.h>
|
|
|
|
#include <net/netfilter/nf_conntrack_expect.h>
|
|
|
|
#include <net/netfilter/nf_conntrack_helper.h>
|
|
|
|
#include <net/netfilter/nf_conntrack_tuple.h>
|
2010-02-15 17:13:33 +00:00
|
|
|
#include <net/netfilter/nf_conntrack_zones.h>
|
2006-11-29 01:34:58 +00:00
|
|
|
|
2007-07-08 05:33:47 +00:00
|
|
|
unsigned int nf_ct_expect_hsize __read_mostly;
|
|
|
|
EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
|
|
|
|
|
2016-05-05 22:51:49 +00:00
|
|
|
struct hlist_head *nf_ct_expect_hash __read_mostly;
|
|
|
|
EXPORT_SYMBOL_GPL(nf_ct_expect_hash);
|
|
|
|
|
2007-07-08 05:36:24 +00:00
|
|
|
unsigned int nf_ct_expect_max __read_mostly;
|
2007-07-08 05:33:47 +00:00
|
|
|
|
2007-07-08 05:32:53 +00:00
|
|
|
static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
|
2016-04-18 14:17:00 +00:00
|
|
|
static unsigned int nf_ct_expect_hashrnd __read_mostly;
|
2006-11-29 01:34:58 +00:00
|
|
|
|
|
|
|
/* nf_conntrack_expect helper functions */
|
2010-10-19 08:19:06 +00:00
|
|
|
void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
|
2013-04-17 06:47:08 +00:00
|
|
|
u32 portid, int report)
|
2006-11-29 01:34:58 +00:00
|
|
|
{
|
|
|
|
struct nf_conn_help *master_help = nfct_help(exp->master);
|
2008-10-08 09:35:03 +00:00
|
|
|
struct net *net = nf_ct_exp_net(exp);
|
2006-11-29 01:34:58 +00:00
|
|
|
|
2011-12-18 00:55:54 +00:00
|
|
|
NF_CT_ASSERT(master_help);
|
2006-11-29 01:34:58 +00:00
|
|
|
NF_CT_ASSERT(!timer_pending(&exp->timeout));
|
|
|
|
|
2008-01-31 12:38:19 +00:00
|
|
|
hlist_del_rcu(&exp->hnode);
|
2008-10-08 09:35:03 +00:00
|
|
|
net->ct.expect_count--;
|
2007-07-08 05:33:47 +00:00
|
|
|
|
2007-07-08 05:35:56 +00:00
|
|
|
hlist_del(&exp->lnode);
|
2011-12-18 00:55:54 +00:00
|
|
|
master_help->expecting[exp->class]--;
|
2010-09-28 19:06:34 +00:00
|
|
|
|
2013-04-17 06:47:08 +00:00
|
|
|
nf_ct_expect_event_report(IPEXP_DESTROY, exp, portid, report);
|
2007-07-08 05:30:49 +00:00
|
|
|
nf_ct_expect_put(exp);
|
2007-07-08 05:35:56 +00:00
|
|
|
|
2008-10-08 09:35:07 +00:00
|
|
|
NF_CT_STAT_INC(net, expect_delete);
|
2006-11-29 01:34:58 +00:00
|
|
|
}
|
2010-10-19 08:19:06 +00:00
|
|
|
EXPORT_SYMBOL_GPL(nf_ct_unlink_expect_report);
|
2006-11-29 01:34:58 +00:00
|
|
|
|
2007-07-08 05:30:49 +00:00
|
|
|
static void nf_ct_expectation_timed_out(unsigned long ul_expect)
|
2006-11-29 01:34:58 +00:00
|
|
|
{
|
|
|
|
struct nf_conntrack_expect *exp = (void *)ul_expect;
|
|
|
|
|
2014-03-03 13:46:01 +00:00
|
|
|
spin_lock_bh(&nf_conntrack_expect_lock);
|
2006-11-29 01:34:58 +00:00
|
|
|
nf_ct_unlink_expect(exp);
|
2014-03-03 13:46:01 +00:00
|
|
|
spin_unlock_bh(&nf_conntrack_expect_lock);
|
2007-07-08 05:30:49 +00:00
|
|
|
nf_ct_expect_put(exp);
|
2006-11-29 01:34:58 +00:00
|
|
|
}
|
|
|
|
|
2016-05-05 22:51:48 +00:00
|
|
|
static unsigned int nf_ct_expect_dst_hash(const struct net *n, const struct nf_conntrack_tuple *tuple)
|
2007-07-08 05:33:47 +00:00
|
|
|
{
|
2016-05-05 22:51:48 +00:00
|
|
|
unsigned int hash, seed;
|
2007-12-18 06:45:52 +00:00
|
|
|
|
2016-04-18 14:17:00 +00:00
|
|
|
get_random_once(&nf_ct_expect_hashrnd, sizeof(nf_ct_expect_hashrnd));
|
2007-07-08 05:33:47 +00:00
|
|
|
|
2016-05-05 22:51:48 +00:00
|
|
|
seed = nf_ct_expect_hashrnd ^ net_hash_mix(n);
|
|
|
|
|
2007-12-18 06:45:52 +00:00
|
|
|
hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
|
2007-07-08 05:33:47 +00:00
|
|
|
(((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
|
2016-05-05 22:51:48 +00:00
|
|
|
(__force __u16)tuple->dst.u.all) ^ seed);
|
2014-08-23 18:58:54 +00:00
|
|
|
|
|
|
|
return reciprocal_scale(hash, nf_ct_expect_hsize);
|
2007-07-08 05:33:47 +00:00
|
|
|
}
|
|
|
|
|
2016-05-05 22:51:47 +00:00
|
|
|
static bool
|
|
|
|
nf_ct_exp_equal(const struct nf_conntrack_tuple *tuple,
|
|
|
|
const struct nf_conntrack_expect *i,
|
|
|
|
const struct nf_conntrack_zone *zone,
|
|
|
|
const struct net *net)
|
|
|
|
{
|
|
|
|
return nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
|
|
|
|
net_eq(net, nf_ct_net(i->master)) &&
|
|
|
|
nf_ct_zone_equal_any(i->master, zone);
|
|
|
|
}
|
|
|
|
|
2006-11-29 01:34:58 +00:00
|
|
|
struct nf_conntrack_expect *
|
2015-08-08 19:40:01 +00:00
|
|
|
__nf_ct_expect_find(struct net *net,
|
|
|
|
const struct nf_conntrack_zone *zone,
|
2010-02-15 17:13:33 +00:00
|
|
|
const struct nf_conntrack_tuple *tuple)
|
2006-11-29 01:34:58 +00:00
|
|
|
{
|
|
|
|
struct nf_conntrack_expect *i;
|
2007-07-08 05:33:47 +00:00
|
|
|
unsigned int h;
|
|
|
|
|
2008-10-08 09:35:03 +00:00
|
|
|
if (!net->ct.expect_count)
|
2007-07-08 05:33:47 +00:00
|
|
|
return NULL;
|
2006-11-29 01:34:58 +00:00
|
|
|
|
2016-05-05 22:51:48 +00:00
|
|
|
h = nf_ct_expect_dst_hash(net, tuple);
|
2016-05-05 22:51:49 +00:00
|
|
|
hlist_for_each_entry_rcu(i, &nf_ct_expect_hash[h], hnode) {
|
2016-05-05 22:51:47 +00:00
|
|
|
if (nf_ct_exp_equal(tuple, i, zone, net))
|
2006-11-29 01:34:58 +00:00
|
|
|
return i;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
2007-07-08 05:30:49 +00:00
|
|
|
EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
|
2006-11-29 01:34:58 +00:00
|
|
|
|
|
|
|
/* Just find a expectation corresponding to a tuple. */
|
|
|
|
struct nf_conntrack_expect *
|
2015-08-08 19:40:01 +00:00
|
|
|
nf_ct_expect_find_get(struct net *net,
|
|
|
|
const struct nf_conntrack_zone *zone,
|
2010-02-15 17:13:33 +00:00
|
|
|
const struct nf_conntrack_tuple *tuple)
|
2006-11-29 01:34:58 +00:00
|
|
|
{
|
|
|
|
struct nf_conntrack_expect *i;
|
|
|
|
|
2008-01-31 12:38:19 +00:00
|
|
|
rcu_read_lock();
|
2010-02-15 17:13:33 +00:00
|
|
|
i = __nf_ct_expect_find(net, zone, tuple);
|
2008-01-31 12:38:19 +00:00
|
|
|
if (i && !atomic_inc_not_zero(&i->use))
|
|
|
|
i = NULL;
|
|
|
|
rcu_read_unlock();
|
2006-11-29 01:34:58 +00:00
|
|
|
|
|
|
|
return i;
|
|
|
|
}
|
2007-07-08 05:30:49 +00:00
|
|
|
EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
|
2006-11-29 01:34:58 +00:00
|
|
|
|
|
|
|
/* If an expectation for this connection is found, it gets delete from
|
|
|
|
* global list then returned. */
|
|
|
|
struct nf_conntrack_expect *
|
2015-08-08 19:40:01 +00:00
|
|
|
nf_ct_find_expectation(struct net *net,
|
|
|
|
const struct nf_conntrack_zone *zone,
|
2010-02-15 17:13:33 +00:00
|
|
|
const struct nf_conntrack_tuple *tuple)
|
2006-11-29 01:34:58 +00:00
|
|
|
{
|
2008-03-26 03:08:37 +00:00
|
|
|
struct nf_conntrack_expect *i, *exp = NULL;
|
|
|
|
unsigned int h;
|
|
|
|
|
2008-10-08 09:35:03 +00:00
|
|
|
if (!net->ct.expect_count)
|
2008-03-26 03:08:37 +00:00
|
|
|
return NULL;
|
2006-12-05 21:44:57 +00:00
|
|
|
|
2016-05-05 22:51:48 +00:00
|
|
|
h = nf_ct_expect_dst_hash(net, tuple);
|
2016-05-05 22:51:49 +00:00
|
|
|
hlist_for_each_entry(i, &nf_ct_expect_hash[h], hnode) {
|
2008-03-26 03:08:37 +00:00
|
|
|
if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
|
2016-05-05 22:51:47 +00:00
|
|
|
nf_ct_exp_equal(tuple, i, zone, net)) {
|
2008-03-26 03:08:37 +00:00
|
|
|
exp = i;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2006-12-05 21:44:57 +00:00
|
|
|
if (!exp)
|
|
|
|
return NULL;
|
2006-11-29 01:34:58 +00:00
|
|
|
|
|
|
|
/* If master is not in hash table yet (ie. packet hasn't left
|
|
|
|
this machine yet), how can other end know about expected?
|
|
|
|
Hence these are not the droids you are looking for (if
|
|
|
|
master ct never got confirmed, we'd hold a reference to it
|
|
|
|
and weird things would happen to future packets). */
|
2006-12-05 21:44:57 +00:00
|
|
|
if (!nf_ct_is_confirmed(exp->master))
|
|
|
|
return NULL;
|
|
|
|
|
2014-03-03 13:45:39 +00:00
|
|
|
/* Avoid race with other CPUs, that for exp->master ct, is
|
|
|
|
* about to invoke ->destroy(), or nf_ct_delete() via timeout
|
|
|
|
* or early_drop().
|
|
|
|
*
|
|
|
|
* The atomic_inc_not_zero() check tells: If that fails, we
|
|
|
|
* know that the ct is being destroyed. If it succeeds, we
|
|
|
|
* can be sure the ct cannot disappear underneath.
|
|
|
|
*/
|
|
|
|
if (unlikely(nf_ct_is_dying(exp->master) ||
|
|
|
|
!atomic_inc_not_zero(&exp->master->ct_general.use)))
|
|
|
|
return NULL;
|
|
|
|
|
2006-12-05 21:44:57 +00:00
|
|
|
if (exp->flags & NF_CT_EXPECT_PERMANENT) {
|
|
|
|
atomic_inc(&exp->use);
|
|
|
|
return exp;
|
|
|
|
} else if (del_timer(&exp->timeout)) {
|
|
|
|
nf_ct_unlink_expect(exp);
|
|
|
|
return exp;
|
2006-11-29 01:34:58 +00:00
|
|
|
}
|
2014-03-03 13:45:39 +00:00
|
|
|
/* Undo exp->master refcnt increase, if del_timer() failed */
|
|
|
|
nf_ct_put(exp->master);
|
2006-12-05 21:44:57 +00:00
|
|
|
|
2006-11-29 01:34:58 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* delete all expectations for this conntrack */
|
|
|
|
void nf_ct_remove_expectations(struct nf_conn *ct)
|
|
|
|
{
|
|
|
|
struct nf_conn_help *help = nfct_help(ct);
|
2007-07-08 05:35:56 +00:00
|
|
|
struct nf_conntrack_expect *exp;
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 01:06:00 +00:00
|
|
|
struct hlist_node *next;
|
2006-11-29 01:34:58 +00:00
|
|
|
|
|
|
|
/* Optimization: most connection never expect any others. */
|
2008-03-26 03:09:15 +00:00
|
|
|
if (!help)
|
2006-11-29 01:34:58 +00:00
|
|
|
return;
|
|
|
|
|
2014-03-03 13:46:01 +00:00
|
|
|
spin_lock_bh(&nf_conntrack_expect_lock);
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 01:06:00 +00:00
|
|
|
hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) {
|
2007-07-08 05:35:56 +00:00
|
|
|
if (del_timer(&exp->timeout)) {
|
|
|
|
nf_ct_unlink_expect(exp);
|
|
|
|
nf_ct_expect_put(exp);
|
2007-02-12 19:15:49 +00:00
|
|
|
}
|
2006-11-29 01:34:58 +00:00
|
|
|
}
|
2014-03-03 13:46:01 +00:00
|
|
|
spin_unlock_bh(&nf_conntrack_expect_lock);
|
2006-11-29 01:34:58 +00:00
|
|
|
}
|
2006-12-03 06:11:25 +00:00
|
|
|
EXPORT_SYMBOL_GPL(nf_ct_remove_expectations);
|
2006-11-29 01:34:58 +00:00
|
|
|
|
|
|
|
/* Would two expected things clash? */
|
|
|
|
static inline int expect_clash(const struct nf_conntrack_expect *a,
|
|
|
|
const struct nf_conntrack_expect *b)
|
|
|
|
{
|
|
|
|
/* Part covered by intersection of masks must be unequal,
|
|
|
|
otherwise they clash */
|
2007-07-08 05:31:32 +00:00
|
|
|
struct nf_conntrack_tuple_mask intersect_mask;
|
2006-11-29 01:34:58 +00:00
|
|
|
int count;
|
|
|
|
|
|
|
|
intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
|
|
|
|
|
|
|
|
for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
|
|
|
|
intersect_mask.src.u3.all[count] =
|
|
|
|
a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
|
|
|
|
}
|
|
|
|
|
2015-07-22 04:37:31 +00:00
|
|
|
return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) &&
|
2016-05-05 22:51:47 +00:00
|
|
|
net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) &&
|
netfilter: nf_conntrack: add direction support for zones
This work adds a direction parameter to netfilter zones, so identity
separation can be performed only in original/reply or both directions
(default). This basically opens up the possibility of doing NAT with
conflicting IP address/port tuples from multiple, isolated tenants
on a host (e.g. from a netns) without requiring each tenant to NAT
twice resp. to use its own dedicated IP address to SNAT to, meaning
overlapping tuples can be made unique with the zone identifier in
original direction, where the NAT engine will then allocate a unique
tuple in the commonly shared default zone for the reply direction.
In some restricted, local DNAT cases, also port redirection could be
used for making the reply traffic unique w/o requiring SNAT.
The consensus we've reached and discussed at NFWS and since the initial
implementation [1] was to directly integrate the direction meta data
into the existing zones infrastructure, as opposed to the ct->mark
approach we proposed initially.
As we pass the nf_conntrack_zone object directly around, we don't have
to touch all call-sites, but only those, that contain equality checks
of zones. Thus, based on the current direction (original or reply),
we either return the actual id, or the default NF_CT_DEFAULT_ZONE_ID.
CT expectations are direction-agnostic entities when expectations are
being compared among themselves, so we can only use the identifier
in this case.
Note that zone identifiers can not be included into the hash mix
anymore as they don't contain a "stable" value that would be equal
for both directions at all times, f.e. if only zone->id would
unconditionally be xor'ed into the table slot hash, then replies won't
find the corresponding conntracking entry anymore.
If no particular direction is specified when configuring zones, the
behaviour is exactly as we expect currently (both directions).
Support has been added for the CT netlink interface as well as the
x_tables raw CT target, which both already offer existing interfaces
to user space for the configuration of zones.
Below a minimal, simplified collision example (script in [2]) with
netperf sessions:
+--- tenant-1 ---+ mark := 1
| netperf |--+
+----------------+ | CT zone := mark [ORIGINAL]
[ip,sport] := X +--------------+ +--- gateway ---+
| mark routing |--| SNAT |-- ... +
+--------------+ +---------------+ |
+--- tenant-2 ---+ | ~~~|~~~
| netperf |--+ +-----------+ |
+----------------+ mark := 2 | netserver |------ ... +
[ip,sport] := X +-----------+
[ip,port] := Y
On the gateway netns, example:
iptables -t raw -A PREROUTING -j CT --zone mark --zone-dir ORIGINAL
iptables -t nat -A POSTROUTING -o <dev> -j SNAT --to-source <ip> --random-fully
iptables -t mangle -A PREROUTING -m conntrack --ctdir ORIGINAL -j CONNMARK --save-mark
iptables -t mangle -A POSTROUTING -m conntrack --ctdir REPLY -j CONNMARK --restore-mark
conntrack dump from gateway netns:
netperf -H 10.1.1.2 -t TCP_STREAM -l60 -p12865,5555 from each tenant netns
tcp 6 431995 ESTABLISHED src=40.1.1.1 dst=10.1.1.2 sport=5555 dport=12865 zone-orig=1
src=10.1.1.2 dst=10.1.1.1 sport=12865 dport=1024
[ASSURED] mark=1 secctx=system_u:object_r:unlabeled_t:s0 use=1
tcp 6 431994 ESTABLISHED src=40.1.1.1 dst=10.1.1.2 sport=5555 dport=12865 zone-orig=2
src=10.1.1.2 dst=10.1.1.1 sport=12865 dport=5555
[ASSURED] mark=2 secctx=system_u:object_r:unlabeled_t:s0 use=1
tcp 6 299 ESTABLISHED src=40.1.1.1 dst=10.1.1.2 sport=39438 dport=33768 zone-orig=1
src=10.1.1.2 dst=10.1.1.1 sport=33768 dport=39438
[ASSURED] mark=1 secctx=system_u:object_r:unlabeled_t:s0 use=1
tcp 6 300 ESTABLISHED src=40.1.1.1 dst=10.1.1.2 sport=32889 dport=40206 zone-orig=2
src=10.1.1.2 dst=10.1.1.1 sport=40206 dport=32889
[ASSURED] mark=2 secctx=system_u:object_r:unlabeled_t:s0 use=2
Taking this further, test script in [2] creates 200 tenants and runs
original-tuple colliding netperf sessions each. A conntrack -L dump in
the gateway netns also confirms 200 overlapping entries, all in ESTABLISHED
state as expected.
I also did run various other tests with some permutations of the script,
to mention some: SNAT in random/random-fully/persistent mode, no zones (no
overlaps), static zones (original, reply, both directions), etc.
[1] http://thread.gmane.org/gmane.comp.security.firewalls.netfilter.devel/57412/
[2] https://paste.fedoraproject.org/242835/65657871/
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
2015-08-14 14:03:39 +00:00
|
|
|
nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
|
2006-11-29 01:34:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline int expect_matches(const struct nf_conntrack_expect *a,
|
|
|
|
const struct nf_conntrack_expect *b)
|
|
|
|
{
|
2009-11-30 00:55:45 +00:00
|
|
|
return a->master == b->master && a->class == b->class &&
|
2015-08-08 19:40:01 +00:00
|
|
|
nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
|
|
|
|
nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
|
2016-05-05 22:51:47 +00:00
|
|
|
net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) &&
|
netfilter: nf_conntrack: add direction support for zones
This work adds a direction parameter to netfilter zones, so identity
separation can be performed only in original/reply or both directions
(default). This basically opens up the possibility of doing NAT with
conflicting IP address/port tuples from multiple, isolated tenants
on a host (e.g. from a netns) without requiring each tenant to NAT
twice resp. to use its own dedicated IP address to SNAT to, meaning
overlapping tuples can be made unique with the zone identifier in
original direction, where the NAT engine will then allocate a unique
tuple in the commonly shared default zone for the reply direction.
In some restricted, local DNAT cases, also port redirection could be
used for making the reply traffic unique w/o requiring SNAT.
The consensus we've reached and discussed at NFWS and since the initial
implementation [1] was to directly integrate the direction meta data
into the existing zones infrastructure, as opposed to the ct->mark
approach we proposed initially.
As we pass the nf_conntrack_zone object directly around, we don't have
to touch all call-sites, but only those, that contain equality checks
of zones. Thus, based on the current direction (original or reply),
we either return the actual id, or the default NF_CT_DEFAULT_ZONE_ID.
CT expectations are direction-agnostic entities when expectations are
being compared among themselves, so we can only use the identifier
in this case.
Note that zone identifiers can not be included into the hash mix
anymore as they don't contain a "stable" value that would be equal
for both directions at all times, f.e. if only zone->id would
unconditionally be xor'ed into the table slot hash, then replies won't
find the corresponding conntracking entry anymore.
If no particular direction is specified when configuring zones, the
behaviour is exactly as we expect currently (both directions).
Support has been added for the CT netlink interface as well as the
x_tables raw CT target, which both already offer existing interfaces
to user space for the configuration of zones.
Below a minimal, simplified collision example (script in [2]) with
netperf sessions:
+--- tenant-1 ---+ mark := 1
| netperf |--+
+----------------+ | CT zone := mark [ORIGINAL]
[ip,sport] := X +--------------+ +--- gateway ---+
| mark routing |--| SNAT |-- ... +
+--------------+ +---------------+ |
+--- tenant-2 ---+ | ~~~|~~~
| netperf |--+ +-----------+ |
+----------------+ mark := 2 | netserver |------ ... +
[ip,sport] := X +-----------+
[ip,port] := Y
On the gateway netns, example:
iptables -t raw -A PREROUTING -j CT --zone mark --zone-dir ORIGINAL
iptables -t nat -A POSTROUTING -o <dev> -j SNAT --to-source <ip> --random-fully
iptables -t mangle -A PREROUTING -m conntrack --ctdir ORIGINAL -j CONNMARK --save-mark
iptables -t mangle -A POSTROUTING -m conntrack --ctdir REPLY -j CONNMARK --restore-mark
conntrack dump from gateway netns:
netperf -H 10.1.1.2 -t TCP_STREAM -l60 -p12865,5555 from each tenant netns
tcp 6 431995 ESTABLISHED src=40.1.1.1 dst=10.1.1.2 sport=5555 dport=12865 zone-orig=1
src=10.1.1.2 dst=10.1.1.1 sport=12865 dport=1024
[ASSURED] mark=1 secctx=system_u:object_r:unlabeled_t:s0 use=1
tcp 6 431994 ESTABLISHED src=40.1.1.1 dst=10.1.1.2 sport=5555 dport=12865 zone-orig=2
src=10.1.1.2 dst=10.1.1.1 sport=12865 dport=5555
[ASSURED] mark=2 secctx=system_u:object_r:unlabeled_t:s0 use=1
tcp 6 299 ESTABLISHED src=40.1.1.1 dst=10.1.1.2 sport=39438 dport=33768 zone-orig=1
src=10.1.1.2 dst=10.1.1.1 sport=33768 dport=39438
[ASSURED] mark=1 secctx=system_u:object_r:unlabeled_t:s0 use=1
tcp 6 300 ESTABLISHED src=40.1.1.1 dst=10.1.1.2 sport=32889 dport=40206 zone-orig=2
src=10.1.1.2 dst=10.1.1.1 sport=40206 dport=32889
[ASSURED] mark=2 secctx=system_u:object_r:unlabeled_t:s0 use=2
Taking this further, test script in [2] creates 200 tenants and runs
original-tuple colliding netperf sessions each. A conntrack -L dump in
the gateway netns also confirms 200 overlapping entries, all in ESTABLISHED
state as expected.
I also did run various other tests with some permutations of the script,
to mention some: SNAT in random/random-fully/persistent mode, no zones (no
overlaps), static zones (original, reply, both directions), etc.
[1] http://thread.gmane.org/gmane.comp.security.firewalls.netfilter.devel/57412/
[2] https://paste.fedoraproject.org/242835/65657871/
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
2015-08-14 14:03:39 +00:00
|
|
|
nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
|
2006-11-29 01:34:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Generally a bad idea to call this: could have matched already. */
|
2007-07-08 05:30:49 +00:00
|
|
|
void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
|
2006-11-29 01:34:58 +00:00
|
|
|
{
|
2014-03-03 13:46:01 +00:00
|
|
|
spin_lock_bh(&nf_conntrack_expect_lock);
|
2007-07-08 05:32:03 +00:00
|
|
|
if (del_timer(&exp->timeout)) {
|
|
|
|
nf_ct_unlink_expect(exp);
|
|
|
|
nf_ct_expect_put(exp);
|
2006-11-29 01:34:58 +00:00
|
|
|
}
|
2014-03-03 13:46:01 +00:00
|
|
|
spin_unlock_bh(&nf_conntrack_expect_lock);
|
2006-11-29 01:34:58 +00:00
|
|
|
}
|
2007-07-08 05:30:49 +00:00
|
|
|
EXPORT_SYMBOL_GPL(nf_ct_unexpect_related);
|
2006-11-29 01:34:58 +00:00
|
|
|
|
|
|
|
/* We don't increase the master conntrack refcount for non-fulfilled
|
|
|
|
* conntracks. During the conntrack destruction, the expectations are
|
|
|
|
* always killed before the conntrack itself */
|
2007-07-08 05:30:49 +00:00
|
|
|
struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
|
2006-11-29 01:34:58 +00:00
|
|
|
{
|
|
|
|
struct nf_conntrack_expect *new;
|
|
|
|
|
2007-07-08 05:30:49 +00:00
|
|
|
new = kmem_cache_alloc(nf_ct_expect_cachep, GFP_ATOMIC);
|
2006-11-29 01:34:58 +00:00
|
|
|
if (!new)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
new->master = me;
|
|
|
|
atomic_set(&new->use, 1);
|
|
|
|
return new;
|
|
|
|
}
|
2007-07-08 05:30:49 +00:00
|
|
|
EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
|
2006-11-29 01:34:58 +00:00
|
|
|
|
2008-03-26 03:09:15 +00:00
|
|
|
void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class,
|
2008-10-08 09:35:00 +00:00
|
|
|
u_int8_t family,
|
2008-03-26 03:07:58 +00:00
|
|
|
const union nf_inet_addr *saddr,
|
|
|
|
const union nf_inet_addr *daddr,
|
|
|
|
u_int8_t proto, const __be16 *src, const __be16 *dst)
|
2006-12-03 06:08:01 +00:00
|
|
|
{
|
|
|
|
int len;
|
|
|
|
|
|
|
|
if (family == AF_INET)
|
|
|
|
len = 4;
|
|
|
|
else
|
|
|
|
len = 16;
|
|
|
|
|
|
|
|
exp->flags = 0;
|
2008-03-26 03:09:15 +00:00
|
|
|
exp->class = class;
|
2006-12-03 06:08:01 +00:00
|
|
|
exp->expectfn = NULL;
|
|
|
|
exp->helper = NULL;
|
|
|
|
exp->tuple.src.l3num = family;
|
|
|
|
exp->tuple.dst.protonum = proto;
|
|
|
|
|
|
|
|
if (saddr) {
|
|
|
|
memcpy(&exp->tuple.src.u3, saddr, len);
|
|
|
|
if (sizeof(exp->tuple.src.u3) > len)
|
|
|
|
/* address needs to be cleared for nf_ct_tuple_equal */
|
|
|
|
memset((void *)&exp->tuple.src.u3 + len, 0x00,
|
|
|
|
sizeof(exp->tuple.src.u3) - len);
|
|
|
|
memset(&exp->mask.src.u3, 0xFF, len);
|
|
|
|
if (sizeof(exp->mask.src.u3) > len)
|
|
|
|
memset((void *)&exp->mask.src.u3 + len, 0x00,
|
|
|
|
sizeof(exp->mask.src.u3) - len);
|
|
|
|
} else {
|
|
|
|
memset(&exp->tuple.src.u3, 0x00, sizeof(exp->tuple.src.u3));
|
|
|
|
memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (src) {
|
2007-07-26 16:33:19 +00:00
|
|
|
exp->tuple.src.u.all = *src;
|
|
|
|
exp->mask.src.u.all = htons(0xFFFF);
|
2006-12-03 06:08:01 +00:00
|
|
|
} else {
|
|
|
|
exp->tuple.src.u.all = 0;
|
|
|
|
exp->mask.src.u.all = 0;
|
|
|
|
}
|
|
|
|
|
2007-07-08 05:31:32 +00:00
|
|
|
memcpy(&exp->tuple.dst.u3, daddr, len);
|
|
|
|
if (sizeof(exp->tuple.dst.u3) > len)
|
|
|
|
/* address needs to be cleared for nf_ct_tuple_equal */
|
|
|
|
memset((void *)&exp->tuple.dst.u3 + len, 0x00,
|
|
|
|
sizeof(exp->tuple.dst.u3) - len);
|
|
|
|
|
2007-07-26 16:33:19 +00:00
|
|
|
exp->tuple.dst.u.all = *dst;
|
2013-07-09 18:16:39 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_NF_NAT_NEEDED
|
|
|
|
memset(&exp->saved_addr, 0, sizeof(exp->saved_addr));
|
|
|
|
memset(&exp->saved_proto, 0, sizeof(exp->saved_proto));
|
|
|
|
#endif
|
2006-12-03 06:08:01 +00:00
|
|
|
}
|
2007-07-08 05:30:49 +00:00
|
|
|
EXPORT_SYMBOL_GPL(nf_ct_expect_init);
|
2006-12-03 06:08:01 +00:00
|
|
|
|
2008-01-31 12:38:19 +00:00
|
|
|
static void nf_ct_expect_free_rcu(struct rcu_head *head)
|
|
|
|
{
|
|
|
|
struct nf_conntrack_expect *exp;
|
|
|
|
|
|
|
|
exp = container_of(head, struct nf_conntrack_expect, rcu);
|
|
|
|
kmem_cache_free(nf_ct_expect_cachep, exp);
|
|
|
|
}
|
|
|
|
|
2007-07-08 05:30:49 +00:00
|
|
|
void nf_ct_expect_put(struct nf_conntrack_expect *exp)
|
2006-11-29 01:34:58 +00:00
|
|
|
{
|
|
|
|
if (atomic_dec_and_test(&exp->use))
|
2008-01-31 12:38:19 +00:00
|
|
|
call_rcu(&exp->rcu, nf_ct_expect_free_rcu);
|
2006-11-29 01:34:58 +00:00
|
|
|
}
|
2007-07-08 05:30:49 +00:00
|
|
|
EXPORT_SYMBOL_GPL(nf_ct_expect_put);
|
2006-11-29 01:34:58 +00:00
|
|
|
|
2017-02-09 13:40:38 +00:00
|
|
|
static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
|
2006-11-29 01:34:58 +00:00
|
|
|
{
|
|
|
|
struct nf_conn_help *master_help = nfct_help(exp->master);
|
2011-12-18 00:55:54 +00:00
|
|
|
struct nf_conntrack_helper *helper;
|
2008-10-08 09:35:03 +00:00
|
|
|
struct net *net = nf_ct_exp_net(exp);
|
2016-05-05 22:51:48 +00:00
|
|
|
unsigned int h = nf_ct_expect_dst_hash(net, &exp->tuple);
|
2006-11-29 01:34:58 +00:00
|
|
|
|
2010-11-16 09:19:18 +00:00
|
|
|
/* two references : one for hash insert, one for the timer */
|
|
|
|
atomic_add(2, &exp->use);
|
2007-07-08 05:35:56 +00:00
|
|
|
|
2011-12-18 00:55:54 +00:00
|
|
|
hlist_add_head(&exp->lnode, &master_help->expectations);
|
|
|
|
master_help->expecting[exp->class]++;
|
2007-07-08 05:33:47 +00:00
|
|
|
|
2016-05-05 22:51:49 +00:00
|
|
|
hlist_add_head_rcu(&exp->hnode, &nf_ct_expect_hash[h]);
|
2008-10-08 09:35:03 +00:00
|
|
|
net->ct.expect_count++;
|
2006-11-29 01:34:58 +00:00
|
|
|
|
2007-07-08 05:30:49 +00:00
|
|
|
setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
|
|
|
|
(unsigned long)exp);
|
2011-12-18 00:55:54 +00:00
|
|
|
helper = rcu_dereference_protected(master_help->helper,
|
2014-03-03 13:46:01 +00:00
|
|
|
lockdep_is_held(&nf_conntrack_expect_lock));
|
2011-12-18 00:55:54 +00:00
|
|
|
if (helper) {
|
|
|
|
exp->timeout.expires = jiffies +
|
|
|
|
helper->expect_policy[exp->class].timeout * HZ;
|
2010-09-28 19:06:34 +00:00
|
|
|
}
|
2006-11-29 01:34:58 +00:00
|
|
|
add_timer(&exp->timeout);
|
|
|
|
|
2008-10-08 09:35:07 +00:00
|
|
|
NF_CT_STAT_INC(net, expect_create);
|
2006-11-29 01:34:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Race with expectations being used means we could have none to find; OK. */
|
2008-03-26 03:09:15 +00:00
|
|
|
static void evict_oldest_expect(struct nf_conn *master,
|
|
|
|
struct nf_conntrack_expect *new)
|
2006-11-29 01:34:58 +00:00
|
|
|
{
|
2007-07-08 05:35:56 +00:00
|
|
|
struct nf_conn_help *master_help = nfct_help(master);
|
2008-03-26 03:09:15 +00:00
|
|
|
struct nf_conntrack_expect *exp, *last = NULL;
|
2006-11-29 01:34:58 +00:00
|
|
|
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 01:06:00 +00:00
|
|
|
hlist_for_each_entry(exp, &master_help->expectations, lnode) {
|
2008-03-26 03:09:15 +00:00
|
|
|
if (exp->class == new->class)
|
|
|
|
last = exp;
|
|
|
|
}
|
2007-07-08 05:35:56 +00:00
|
|
|
|
2008-03-26 03:09:15 +00:00
|
|
|
if (last && del_timer(&last->timeout)) {
|
|
|
|
nf_ct_unlink_expect(last);
|
|
|
|
nf_ct_expect_put(last);
|
2006-11-29 01:34:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-11-18 10:56:20 +00:00
|
|
|
static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
|
2006-11-29 01:34:58 +00:00
|
|
|
{
|
2008-03-26 03:09:15 +00:00
|
|
|
const struct nf_conntrack_expect_policy *p;
|
2006-11-29 01:34:58 +00:00
|
|
|
struct nf_conntrack_expect *i;
|
|
|
|
struct nf_conn *master = expect->master;
|
|
|
|
struct nf_conn_help *master_help = nfct_help(master);
|
2011-12-18 00:55:54 +00:00
|
|
|
struct nf_conntrack_helper *helper;
|
2008-10-08 09:35:03 +00:00
|
|
|
struct net *net = nf_ct_exp_net(expect);
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 01:06:00 +00:00
|
|
|
struct hlist_node *next;
|
2007-07-08 05:33:47 +00:00
|
|
|
unsigned int h;
|
2009-04-06 15:47:20 +00:00
|
|
|
int ret = 1;
|
2006-11-29 01:34:58 +00:00
|
|
|
|
2011-12-18 00:55:54 +00:00
|
|
|
if (!master_help) {
|
2007-06-05 19:55:27 +00:00
|
|
|
ret = -ESHUTDOWN;
|
|
|
|
goto out;
|
|
|
|
}
|
2016-05-05 22:51:48 +00:00
|
|
|
h = nf_ct_expect_dst_hash(net, &expect->tuple);
|
2016-05-05 22:51:49 +00:00
|
|
|
hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) {
|
2006-11-29 01:34:58 +00:00
|
|
|
if (expect_matches(i, expect)) {
|
netfilter: nf_ct_expect: fix possible access to uninitialized timer
In __nf_ct_expect_check, the function refresh_timer returns 1
if a matching expectation is found and its timer is successfully
refreshed. This results in nf_ct_expect_related returning 0.
Note that at this point:
- the passed expectation is not inserted in the expectation table
and its timer was not initialized, since we have refreshed one
matching/existing expectation.
- nf_ct_expect_alloc uses kmem_cache_alloc, so the expectation
timer is in some undefined state just after the allocation,
until it is appropriately initialized.
This can be a problem for the SIP helper during the expectation
addition:
...
if (nf_ct_expect_related(rtp_exp) == 0) {
if (nf_ct_expect_related(rtcp_exp) != 0)
nf_ct_unexpect_related(rtp_exp);
...
Note that nf_ct_expect_related(rtp_exp) may return 0 for the timer refresh
case that is detailed above. Then, if nf_ct_unexpect_related(rtcp_exp)
returns != 0, nf_ct_unexpect_related(rtp_exp) is called, which does:
spin_lock_bh(&nf_conntrack_lock);
if (del_timer(&exp->timeout)) {
nf_ct_unlink_expect(exp);
nf_ct_expect_put(exp);
}
spin_unlock_bh(&nf_conntrack_lock);
Note that del_timer always returns false if the timer has been
initialized. However, the timer was not initialized since setup_timer
was not called, therefore, the expectation timer remains in some
undefined state. If I'm not missing anything, this may lead to the
removal an unexistent expectation.
To fix this, the optimization that allows refreshing an expectation
is removed. Now nf_conntrack_expect_related looks more consistent
to me since it always add the expectation in case that it returns
success.
Thanks to Patrick McHardy for participating in the discussion of
this patch.
I think this may be the source of the problem described by:
http://marc.info/?l=netfilter-devel&m=134073514719421&w=2
Reported-by: Rafal Fitt <rafalf@aplusc.com.pl>
Acked-by: Patrick McHardy <kaber@trash.net>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
2012-08-16 00:25:24 +00:00
|
|
|
if (del_timer(&i->timeout)) {
|
|
|
|
nf_ct_unlink_expect(i);
|
|
|
|
nf_ct_expect_put(i);
|
|
|
|
break;
|
2006-11-29 01:34:58 +00:00
|
|
|
}
|
|
|
|
} else if (expect_clash(i, expect)) {
|
|
|
|
ret = -EBUSY;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* Will be over limit? */
|
2011-12-18 00:55:54 +00:00
|
|
|
helper = rcu_dereference_protected(master_help->helper,
|
2014-03-03 13:46:01 +00:00
|
|
|
lockdep_is_held(&nf_conntrack_expect_lock));
|
2011-12-18 00:55:54 +00:00
|
|
|
if (helper) {
|
|
|
|
p = &helper->expect_policy[expect->class];
|
2010-09-28 19:06:34 +00:00
|
|
|
if (p->max_expected &&
|
|
|
|
master_help->expecting[expect->class] >= p->max_expected) {
|
|
|
|
evict_oldest_expect(master, expect);
|
|
|
|
if (master_help->expecting[expect->class]
|
|
|
|
>= p->max_expected) {
|
|
|
|
ret = -EMFILE;
|
|
|
|
goto out;
|
|
|
|
}
|
2008-03-26 03:09:15 +00:00
|
|
|
}
|
|
|
|
}
|
2006-11-29 01:34:58 +00:00
|
|
|
|
2008-10-08 09:35:03 +00:00
|
|
|
if (net->ct.expect_count >= nf_ct_expect_max) {
|
2012-05-13 21:56:26 +00:00
|
|
|
net_warn_ratelimited("nf_conntrack: expectation table full\n");
|
2007-07-08 05:36:24 +00:00
|
|
|
ret = -EMFILE;
|
|
|
|
}
|
2008-11-18 10:56:20 +00:00
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-03-03 13:44:54 +00:00
|
|
|
int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
|
2013-04-17 06:47:08 +00:00
|
|
|
u32 portid, int report)
|
2008-11-18 10:56:20 +00:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2014-03-03 13:46:01 +00:00
|
|
|
spin_lock_bh(&nf_conntrack_expect_lock);
|
2008-11-18 10:56:20 +00:00
|
|
|
ret = __nf_ct_expect_check(expect);
|
2009-04-06 15:47:20 +00:00
|
|
|
if (ret <= 0)
|
2008-11-18 10:56:20 +00:00
|
|
|
goto out;
|
2007-07-08 05:36:24 +00:00
|
|
|
|
2017-02-09 13:40:38 +00:00
|
|
|
nf_ct_expect_insert(expect);
|
|
|
|
|
2014-03-03 13:46:01 +00:00
|
|
|
spin_unlock_bh(&nf_conntrack_expect_lock);
|
2013-04-17 06:47:08 +00:00
|
|
|
nf_ct_expect_event_report(IPEXP_NEW, expect, portid, report);
|
2017-02-24 01:08:53 +00:00
|
|
|
return 0;
|
2008-11-18 10:56:20 +00:00
|
|
|
out:
|
2014-03-03 13:46:01 +00:00
|
|
|
spin_unlock_bh(&nf_conntrack_expect_lock);
|
2008-11-18 10:56:20 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nf_ct_expect_related_report);
|
|
|
|
|
2011-04-21 07:32:45 +00:00
|
|
|
#ifdef CONFIG_NF_CONNTRACK_PROCFS
|
2007-07-08 05:34:07 +00:00
|
|
|
struct ct_expect_iter_state {
|
2008-10-08 09:35:06 +00:00
|
|
|
struct seq_net_private p;
|
2007-07-08 05:34:07 +00:00
|
|
|
unsigned int bucket;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
|
2006-11-29 01:34:58 +00:00
|
|
|
{
|
2007-07-08 05:34:07 +00:00
|
|
|
struct ct_expect_iter_state *st = seq->private;
|
2008-01-31 12:38:19 +00:00
|
|
|
struct hlist_node *n;
|
2006-11-29 01:34:58 +00:00
|
|
|
|
2007-07-08 05:34:07 +00:00
|
|
|
for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
|
2016-05-05 22:51:49 +00:00
|
|
|
n = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket]));
|
2008-01-31 12:38:19 +00:00
|
|
|
if (n)
|
|
|
|
return n;
|
2007-07-08 05:34:07 +00:00
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
2006-11-29 01:34:58 +00:00
|
|
|
|
2007-07-08 05:34:07 +00:00
|
|
|
static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
|
|
|
|
struct hlist_node *head)
|
|
|
|
{
|
|
|
|
struct ct_expect_iter_state *st = seq->private;
|
2006-11-29 01:34:58 +00:00
|
|
|
|
2010-11-15 17:17:21 +00:00
|
|
|
head = rcu_dereference(hlist_next_rcu(head));
|
2007-07-08 05:34:07 +00:00
|
|
|
while (head == NULL) {
|
|
|
|
if (++st->bucket >= nf_ct_expect_hsize)
|
2006-11-29 01:34:58 +00:00
|
|
|
return NULL;
|
2016-05-05 22:51:49 +00:00
|
|
|
head = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket]));
|
2006-11-29 01:34:58 +00:00
|
|
|
}
|
2007-07-08 05:34:07 +00:00
|
|
|
return head;
|
2006-11-29 01:34:58 +00:00
|
|
|
}
|
|
|
|
|
2007-07-08 05:34:07 +00:00
|
|
|
static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
|
2006-11-29 01:34:58 +00:00
|
|
|
{
|
2007-07-08 05:34:07 +00:00
|
|
|
struct hlist_node *head = ct_expect_get_first(seq);
|
2006-11-29 01:34:58 +00:00
|
|
|
|
2007-07-08 05:34:07 +00:00
|
|
|
if (head)
|
|
|
|
while (pos && (head = ct_expect_get_next(seq, head)))
|
|
|
|
pos--;
|
|
|
|
return pos ? NULL : head;
|
|
|
|
}
|
2006-11-29 01:34:58 +00:00
|
|
|
|
2007-07-08 05:34:07 +00:00
|
|
|
static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
|
2008-01-31 12:38:19 +00:00
|
|
|
__acquires(RCU)
|
2007-07-08 05:34:07 +00:00
|
|
|
{
|
2008-01-31 12:38:19 +00:00
|
|
|
rcu_read_lock();
|
2007-07-08 05:34:07 +00:00
|
|
|
return ct_expect_get_idx(seq, *pos);
|
|
|
|
}
|
2006-11-29 01:34:58 +00:00
|
|
|
|
2007-07-08 05:34:07 +00:00
|
|
|
static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
|
|
|
{
|
|
|
|
(*pos)++;
|
|
|
|
return ct_expect_get_next(seq, v);
|
2006-11-29 01:34:58 +00:00
|
|
|
}
|
|
|
|
|
2007-07-08 05:34:07 +00:00
|
|
|
static void exp_seq_stop(struct seq_file *seq, void *v)
|
2008-01-31 12:38:19 +00:00
|
|
|
__releases(RCU)
|
2006-11-29 01:34:58 +00:00
|
|
|
{
|
2008-01-31 12:38:19 +00:00
|
|
|
rcu_read_unlock();
|
2006-11-29 01:34:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int exp_seq_show(struct seq_file *s, void *v)
|
|
|
|
{
|
2007-07-08 05:34:07 +00:00
|
|
|
struct nf_conntrack_expect *expect;
|
2010-02-11 11:22:48 +00:00
|
|
|
struct nf_conntrack_helper *helper;
|
2007-07-08 05:34:07 +00:00
|
|
|
struct hlist_node *n = v;
|
2008-03-26 03:08:37 +00:00
|
|
|
char *delim = "";
|
2007-07-08 05:34:07 +00:00
|
|
|
|
|
|
|
expect = hlist_entry(n, struct nf_conntrack_expect, hnode);
|
2006-11-29 01:34:58 +00:00
|
|
|
|
|
|
|
if (expect->timeout.function)
|
|
|
|
seq_printf(s, "%ld ", timer_pending(&expect->timeout)
|
|
|
|
? (long)(expect->timeout.expires - jiffies)/HZ : 0);
|
|
|
|
else
|
|
|
|
seq_printf(s, "- ");
|
|
|
|
seq_printf(s, "l3proto = %u proto=%u ",
|
|
|
|
expect->tuple.src.l3num,
|
|
|
|
expect->tuple.dst.protonum);
|
|
|
|
print_tuple(s, &expect->tuple,
|
|
|
|
__nf_ct_l3proto_find(expect->tuple.src.l3num),
|
2006-11-29 01:35:06 +00:00
|
|
|
__nf_ct_l4proto_find(expect->tuple.src.l3num,
|
2006-11-29 01:34:58 +00:00
|
|
|
expect->tuple.dst.protonum));
|
2008-03-26 03:08:17 +00:00
|
|
|
|
2008-03-26 03:08:37 +00:00
|
|
|
if (expect->flags & NF_CT_EXPECT_PERMANENT) {
|
|
|
|
seq_printf(s, "PERMANENT");
|
|
|
|
delim = ",";
|
|
|
|
}
|
2010-09-28 19:06:34 +00:00
|
|
|
if (expect->flags & NF_CT_EXPECT_INACTIVE) {
|
2008-03-26 03:08:37 +00:00
|
|
|
seq_printf(s, "%sINACTIVE", delim);
|
2010-09-28 19:06:34 +00:00
|
|
|
delim = ",";
|
|
|
|
}
|
|
|
|
if (expect->flags & NF_CT_EXPECT_USERSPACE)
|
|
|
|
seq_printf(s, "%sUSERSPACE", delim);
|
2008-03-26 03:08:17 +00:00
|
|
|
|
2010-02-11 11:22:48 +00:00
|
|
|
helper = rcu_dereference(nfct_help(expect->master)->helper);
|
|
|
|
if (helper) {
|
|
|
|
seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
|
2016-08-08 13:57:58 +00:00
|
|
|
if (helper->expect_policy[expect->class].name[0])
|
2010-02-11 11:22:48 +00:00
|
|
|
seq_printf(s, "/%s",
|
|
|
|
helper->expect_policy[expect->class].name);
|
|
|
|
}
|
|
|
|
|
2015-03-16 18:25:17 +00:00
|
|
|
seq_putc(s, '\n');
|
|
|
|
|
|
|
|
return 0;
|
2006-11-29 01:34:58 +00:00
|
|
|
}
|
|
|
|
|
2007-07-11 06:07:31 +00:00
|
|
|
static const struct seq_operations exp_seq_ops = {
|
2006-11-29 01:34:58 +00:00
|
|
|
.start = exp_seq_start,
|
|
|
|
.next = exp_seq_next,
|
|
|
|
.stop = exp_seq_stop,
|
|
|
|
.show = exp_seq_show
|
|
|
|
};
|
|
|
|
|
|
|
|
static int exp_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
2008-10-08 09:35:06 +00:00
|
|
|
return seq_open_net(inode, file, &exp_seq_ops,
|
2007-10-10 09:29:58 +00:00
|
|
|
sizeof(struct ct_expect_iter_state));
|
2006-11-29 01:34:58 +00:00
|
|
|
}
|
|
|
|
|
2007-07-08 05:34:07 +00:00
|
|
|
static const struct file_operations exp_file_ops = {
|
2006-11-29 01:34:58 +00:00
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.open = exp_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
2008-10-08 09:35:06 +00:00
|
|
|
.release = seq_release_net,
|
2006-11-29 01:34:58 +00:00
|
|
|
};
|
2011-04-21 07:32:45 +00:00
|
|
|
#endif /* CONFIG_NF_CONNTRACK_PROCFS */
|
2007-07-08 05:32:53 +00:00
|
|
|
|
2008-10-08 09:35:06 +00:00
|
|
|
static int exp_proc_init(struct net *net)
|
2007-07-08 05:32:53 +00:00
|
|
|
{
|
2011-04-21 07:32:45 +00:00
|
|
|
#ifdef CONFIG_NF_CONNTRACK_PROCFS
|
2007-07-08 05:32:53 +00:00
|
|
|
struct proc_dir_entry *proc;
|
2015-11-22 11:35:07 +00:00
|
|
|
kuid_t root_uid;
|
|
|
|
kgid_t root_gid;
|
2007-07-08 05:32:53 +00:00
|
|
|
|
2013-02-18 01:34:54 +00:00
|
|
|
proc = proc_create("nf_conntrack_expect", 0440, net->proc_net,
|
|
|
|
&exp_file_ops);
|
2007-07-08 05:32:53 +00:00
|
|
|
if (!proc)
|
|
|
|
return -ENOMEM;
|
2015-11-22 11:35:07 +00:00
|
|
|
|
|
|
|
root_uid = make_kuid(net->user_ns, 0);
|
|
|
|
root_gid = make_kgid(net->user_ns, 0);
|
|
|
|
if (uid_valid(root_uid) && gid_valid(root_gid))
|
|
|
|
proc_set_user(proc, root_uid, root_gid);
|
2011-04-21 07:32:45 +00:00
|
|
|
#endif /* CONFIG_NF_CONNTRACK_PROCFS */
|
2007-07-08 05:32:53 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-10-08 09:35:06 +00:00
|
|
|
static void exp_proc_remove(struct net *net)
|
2007-07-08 05:32:53 +00:00
|
|
|
{
|
2011-04-21 07:32:45 +00:00
|
|
|
#ifdef CONFIG_NF_CONNTRACK_PROCFS
|
2013-02-18 01:34:56 +00:00
|
|
|
remove_proc_entry("nf_conntrack_expect", net->proc_net);
|
2011-04-21 07:32:45 +00:00
|
|
|
#endif /* CONFIG_NF_CONNTRACK_PROCFS */
|
2007-07-08 05:32:53 +00:00
|
|
|
}
|
|
|
|
|
2010-02-08 19:17:22 +00:00
|
|
|
module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
|
2007-07-08 05:33:47 +00:00
|
|
|
|
2013-01-21 22:10:25 +00:00
|
|
|
int nf_conntrack_expect_pernet_init(struct net *net)
|
2007-07-08 05:32:53 +00:00
|
|
|
{
|
2008-10-08 09:35:03 +00:00
|
|
|
net->ct.expect_count = 0;
|
2016-05-05 22:51:49 +00:00
|
|
|
return exp_proc_init(net);
|
2007-07-08 05:32:53 +00:00
|
|
|
}
|
|
|
|
|
2013-01-21 22:10:25 +00:00
|
|
|
void nf_conntrack_expect_pernet_fini(struct net *net)
|
2007-07-08 05:32:53 +00:00
|
|
|
{
|
2008-10-08 09:35:06 +00:00
|
|
|
exp_proc_remove(net);
|
2007-07-08 05:32:53 +00:00
|
|
|
}
|
2013-01-21 22:10:25 +00:00
|
|
|
|
|
|
|
int nf_conntrack_expect_init(void)
|
|
|
|
{
|
|
|
|
if (!nf_ct_expect_hsize) {
|
|
|
|
nf_ct_expect_hsize = nf_conntrack_htable_size / 256;
|
|
|
|
if (!nf_ct_expect_hsize)
|
|
|
|
nf_ct_expect_hsize = 1;
|
|
|
|
}
|
|
|
|
nf_ct_expect_max = nf_ct_expect_hsize * 4;
|
|
|
|
nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
|
|
|
|
sizeof(struct nf_conntrack_expect),
|
|
|
|
0, 0, NULL);
|
|
|
|
if (!nf_ct_expect_cachep)
|
|
|
|
return -ENOMEM;
|
2016-05-05 22:51:49 +00:00
|
|
|
|
|
|
|
nf_ct_expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
|
|
|
|
if (!nf_ct_expect_hash) {
|
|
|
|
kmem_cache_destroy(nf_ct_expect_cachep);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2013-01-21 22:10:25 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void nf_conntrack_expect_fini(void)
|
|
|
|
{
|
|
|
|
rcu_barrier(); /* Wait for call_rcu() before destroy */
|
|
|
|
kmem_cache_destroy(nf_ct_expect_cachep);
|
2016-05-05 22:51:49 +00:00
|
|
|
nf_ct_free_hashtable(nf_ct_expect_hash, nf_ct_expect_hsize);
|
2013-01-21 22:10:25 +00:00
|
|
|
}
|