Commit a86888b9 authored by Patrick McHardy's avatar Patrick McHardy Committed by David S. Miller
Browse files

[NETFILTER]: Fix multiple problems with the conntrack event cache



refcnt underflow: the reference count is decremented when a conntrack
entry is removed from the hash but it is not incremented when entering
new entries.

missing protection of process context against softirq context: all
cache operations need to locally disable softirqs to avoid races.
Additionally the event cache can't be initialized when a packet
enteres the conntrack code but needs to be initialized whenever we
cache an event and the stored conntrack entry doesn't match the
current one.

incorrect flushing of the event cache in ip_ct_iterate_cleanup:
without real locking we can't flush the cache for different CPUs
without incurring races. The cache for different CPUs can only be
flushed when no packets are going through the
code. ip_ct_iterate_cleanup doesn't need to drop all references, so
flushing is moved to the cleanup path.
Signed-off-by: default avatarPatrick McHardy <kaber@trash.net>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a55ebcc4
......@@ -411,6 +411,7 @@ struct ip_conntrack_stat
#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS
#include <linux/notifier.h>
#include <linux/interrupt.h>
struct ip_conntrack_ecache {
struct ip_conntrack *ct;
......@@ -445,26 +446,24 @@ ip_conntrack_expect_unregister_notifier(struct notifier_block *nb)
return notifier_chain_unregister(&ip_conntrack_expect_chain, nb);
}
extern void ip_ct_deliver_cached_events(const struct ip_conntrack *ct);
extern void __ip_ct_event_cache_init(struct ip_conntrack *ct);
static inline void
ip_conntrack_event_cache(enum ip_conntrack_events event,
const struct sk_buff *skb)
{
struct ip_conntrack_ecache *ecache =
&__get_cpu_var(ip_conntrack_ecache);
if (unlikely((struct ip_conntrack *) skb->nfct != ecache->ct)) {
if (net_ratelimit()) {
printk(KERN_ERR "ctevent: skb->ct != ecache->ct !!!\n");
dump_stack();
}
}
struct ip_conntrack *ct = (struct ip_conntrack *)skb->nfct;
struct ip_conntrack_ecache *ecache;
local_bh_disable();
ecache = &__get_cpu_var(ip_conntrack_ecache);
if (ct != ecache->ct)
__ip_ct_event_cache_init(ct);
ecache->events |= event;
local_bh_enable();
}
extern void
ip_conntrack_deliver_cached_events_for(const struct ip_conntrack *ct);
extern void ip_conntrack_event_cache_init(const struct sk_buff *skb);
static inline void ip_conntrack_event(enum ip_conntrack_events event,
struct ip_conntrack *ct)
{
......@@ -483,9 +482,7 @@ static inline void ip_conntrack_event_cache(enum ip_conntrack_events event,
const struct sk_buff *skb) {}
static inline void ip_conntrack_event(enum ip_conntrack_events event,
struct ip_conntrack *ct) {}
static inline void ip_conntrack_deliver_cached_events_for(
struct ip_conntrack *ct) {}
static inline void ip_conntrack_event_cache_init(const struct sk_buff *skb) {}
static inline void ip_ct_deliver_cached_events(const struct ip_conntrack *ct) {}
static inline void
ip_conntrack_expect_event(enum ip_conntrack_expect_events event,
struct ip_conntrack_expect *exp) {}
......
......@@ -44,18 +44,14 @@ static inline int ip_conntrack_confirm(struct sk_buff **pskb)
struct ip_conntrack *ct = (struct ip_conntrack *)(*pskb)->nfct;
int ret = NF_ACCEPT;
if (ct && !is_confirmed(ct))
ret = __ip_conntrack_confirm(pskb);
ip_conntrack_deliver_cached_events_for(ct);
if (ct) {
if (!is_confirmed(ct))
ret = __ip_conntrack_confirm(pskb);
ip_ct_deliver_cached_events(ct);
}
return ret;
}
#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS
struct ip_conntrack_ecache;
extern void __ip_ct_deliver_cached_events(struct ip_conntrack_ecache *ec);
#endif
extern void __ip_ct_expect_unlink_destroy(struct ip_conntrack_expect *exp);
extern struct list_head *ip_conntrack_hash;
......
......@@ -85,73 +85,62 @@ struct notifier_block *ip_conntrack_expect_chain;
DEFINE_PER_CPU(struct ip_conntrack_ecache, ip_conntrack_ecache);
static inline void __deliver_cached_events(struct ip_conntrack_ecache *ecache)
/* deliver cached events and clear cache entry - must be called with locally
* disabled softirqs */
static inline void
__ip_ct_deliver_cached_events(struct ip_conntrack_ecache *ecache)
{
DEBUGP("ecache: delivering events for %p\n", ecache->ct);
if (is_confirmed(ecache->ct) && !is_dying(ecache->ct) && ecache->events)
notifier_call_chain(&ip_conntrack_chain, ecache->events,
ecache->ct);
ecache->events = 0;
}
void __ip_ct_deliver_cached_events(struct ip_conntrack_ecache *ecache)
{
__deliver_cached_events(ecache);
ip_conntrack_put(ecache->ct);
ecache->ct = NULL;
}
/* Deliver all cached events for a particular conntrack. This is called
* by code prior to async packet handling or freeing the skb */
void
ip_conntrack_deliver_cached_events_for(const struct ip_conntrack *ct)
void ip_ct_deliver_cached_events(const struct ip_conntrack *ct)
{
struct ip_conntrack_ecache *ecache =
&__get_cpu_var(ip_conntrack_ecache);
if (!ct)
return;
struct ip_conntrack_ecache *ecache;
local_bh_disable();
ecache = &__get_cpu_var(ip_conntrack_ecache);
if (ecache->ct == ct)
__ip_ct_deliver_cached_events(ecache);
local_bh_enable();
}
if (ecache->ct == ct) {
DEBUGP("ecache: delivering event for %p\n", ct);
__deliver_cached_events(ecache);
} else {
if (net_ratelimit())
printk(KERN_WARNING "ecache: want to deliver for %p, "
"but cache has %p\n", ct, ecache->ct);
}
void __ip_ct_event_cache_init(struct ip_conntrack *ct)
{
struct ip_conntrack_ecache *ecache;
/* signalize that events have already been delivered */
ecache->ct = NULL;
/* take care of delivering potentially old events */
ecache = &__get_cpu_var(ip_conntrack_ecache);
BUG_ON(ecache->ct == ct);
if (ecache->ct)
__ip_ct_deliver_cached_events(ecache);
/* initialize for this conntrack/packet */
ecache->ct = ct;
nf_conntrack_get(&ct->ct_general);
}
/* Deliver cached events for old pending events, if current conntrack != old */
void ip_conntrack_event_cache_init(const struct sk_buff *skb)
/* flush the event cache - touches other CPU's data and must not be called while
* packets are still passing through the code */
static void ip_ct_event_cache_flush(void)
{
struct ip_conntrack *ct = (struct ip_conntrack *) skb->nfct;
struct ip_conntrack_ecache *ecache =
&__get_cpu_var(ip_conntrack_ecache);
struct ip_conntrack_ecache *ecache;
int cpu;
/* take care of delivering potentially old events */
if (ecache->ct != ct) {
enum ip_conntrack_info ctinfo;
/* we have to check, since at startup the cache is NULL */
if (likely(ecache->ct)) {
DEBUGP("ecache: entered for different conntrack: "
"ecache->ct=%p, skb->nfct=%p. delivering "
"events\n", ecache->ct, ct);
__deliver_cached_events(ecache);
for_each_cpu(cpu) {
ecache = &per_cpu(ip_conntrack_ecache, cpu);
if (ecache->ct)
ip_conntrack_put(ecache->ct);
} else {
DEBUGP("ecache: entered for conntrack %p, "
"cache was clean before\n", ct);
}
/* initialize for this conntrack/packet */
ecache->ct = ip_conntrack_get(skb, &ctinfo);
/* ecache->events cleared by __deliver_cached_devents() */
} else {
DEBUGP("ecache: re-entered for conntrack %p.\n", ct);
}
}
#else
static inline void ip_ct_event_cache_flush(void) {}
#endif /* CONFIG_IP_NF_CONNTRACK_EVENTS */
DEFINE_PER_CPU(struct ip_conntrack_stat, ip_conntrack_stat);
......@@ -878,8 +867,6 @@ unsigned int ip_conntrack_in(unsigned int hooknum,
IP_NF_ASSERT((*pskb)->nfct);
ip_conntrack_event_cache_init(*pskb);
ret = proto->packet(ct, *pskb, ctinfo);
if (ret < 0) {
/* Invalid: inverse of the return code tells
......@@ -1278,23 +1265,6 @@ ip_ct_iterate_cleanup(int (*iter)(struct ip_conntrack *i, void *), void *data)
ip_conntrack_put(ct);
}
#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS
{
/* we need to deliver all cached events in order to drop
* the reference counts */
int cpu;
for_each_cpu(cpu) {
struct ip_conntrack_ecache *ecache =
&per_cpu(ip_conntrack_ecache, cpu);
if (ecache->ct) {
__ip_ct_deliver_cached_events(ecache);
ip_conntrack_put(ecache->ct);
ecache->ct = NULL;
}
}
}
#endif
}
/* Fast function for those who don't want to parse /proc (and I don't
......@@ -1381,6 +1351,7 @@ void ip_conntrack_flush()
delete... */
synchronize_net();
ip_ct_event_cache_flush();
i_see_dead_people:
ip_ct_iterate_cleanup(kill_all, NULL);
if (atomic_read(&ip_conntrack_count) != 0) {
......
......@@ -401,7 +401,6 @@ static unsigned int ip_confirm(unsigned int hooknum,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
ip_conntrack_event_cache_init(*pskb);
/* We've seen it coming out the other side: confirm it */
return ip_conntrack_confirm(pskb);
}
......@@ -419,7 +418,6 @@ static unsigned int ip_conntrack_help(unsigned int hooknum,
ct = ip_conntrack_get(*pskb, &ctinfo);
if (ct && ct->helper) {
unsigned int ret;
ip_conntrack_event_cache_init(*pskb);
ret = ct->helper->help(pskb, ct, ctinfo);
if (ret != NF_ACCEPT)
return ret;
......@@ -978,6 +976,7 @@ EXPORT_SYMBOL_GPL(ip_conntrack_chain);
EXPORT_SYMBOL_GPL(ip_conntrack_expect_chain);
EXPORT_SYMBOL_GPL(ip_conntrack_register_notifier);
EXPORT_SYMBOL_GPL(ip_conntrack_unregister_notifier);
EXPORT_SYMBOL_GPL(__ip_ct_event_cache_init);
EXPORT_PER_CPU_SYMBOL_GPL(ip_conntrack_ecache);
#endif
EXPORT_SYMBOL(ip_conntrack_protocol_register);
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment