Commit e45b1be8 authored by Patrick McHardy's avatar Patrick McHardy Committed by David S. Miller
Browse files

[NETFILTER]: Kill lockhelp.h


Signed-off-by: default avatarPatrick McHardy <kaber@trash.net>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c9e3e8b6
#ifndef _IP_CONNTRACK_CORE_H
#define _IP_CONNTRACK_CORE_H
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4/lockhelp.h>
/* This header is used to share core functionality between the
standalone connection tracking module, and the compatibility layer's use
......@@ -47,6 +46,6 @@ static inline int ip_conntrack_confirm(struct sk_buff **pskb)
extern struct list_head *ip_conntrack_hash;
extern struct list_head ip_conntrack_expect_list;
DECLARE_RWLOCK_EXTERN(ip_conntrack_lock);
extern rwlock_t ip_conntrack_lock;
#endif /* _IP_CONNTRACK_CORE_H */
......@@ -50,10 +50,9 @@ struct ip_nat_multi_range_compat
#ifdef __KERNEL__
#include <linux/list.h>
#include <linux/netfilter_ipv4/lockhelp.h>
/* Protects NAT hash tables, and NAT-private part of conntracks. */
DECLARE_RWLOCK_EXTERN(ip_nat_lock);
extern rwlock_t ip_nat_lock;
/* The structure embedded in the conntrack structure. */
struct ip_nat_info
......
......@@ -2,7 +2,6 @@
#define _LISTHELP_H
#include <linux/config.h>
#include <linux/list.h>
#include <linux/netfilter_ipv4/lockhelp.h>
/* Header to do more comprehensive job than linux/list.h; assume list
is first entry in structure. */
......
#ifndef _LOCKHELP_H
#define _LOCKHELP_H
#include <linux/config.h>
#include <linux/spinlock.h>
#include <asm/atomic.h>
#include <linux/interrupt.h>
#include <linux/smp.h>
/* Header to do help in lock debugging. */
#ifdef CONFIG_NETFILTER_DEBUG
struct spinlock_debug
{
spinlock_t l;
atomic_t locked_by;
};
struct rwlock_debug
{
rwlock_t l;
long read_locked_map;
long write_locked_map;
};
#define DECLARE_LOCK(l) \
struct spinlock_debug l = { SPIN_LOCK_UNLOCKED, ATOMIC_INIT(-1) }
#define DECLARE_LOCK_EXTERN(l) \
extern struct spinlock_debug l
#define DECLARE_RWLOCK(l) \
struct rwlock_debug l = { RW_LOCK_UNLOCKED, 0, 0 }
#define DECLARE_RWLOCK_EXTERN(l) \
extern struct rwlock_debug l
#define MUST_BE_LOCKED(l) \
do { if (atomic_read(&(l)->locked_by) != smp_processor_id()) \
printk("ASSERT %s:%u %s unlocked\n", __FILE__, __LINE__, #l); \
} while(0)
#define MUST_BE_UNLOCKED(l) \
do { if (atomic_read(&(l)->locked_by) == smp_processor_id()) \
printk("ASSERT %s:%u %s locked\n", __FILE__, __LINE__, #l); \
} while(0)
/* Write locked OK as well. */
#define MUST_BE_READ_LOCKED(l) \
do { if (!((l)->read_locked_map & (1UL << smp_processor_id())) \
&& !((l)->write_locked_map & (1UL << smp_processor_id()))) \
printk("ASSERT %s:%u %s not readlocked\n", __FILE__, __LINE__, #l); \
} while(0)
#define MUST_BE_WRITE_LOCKED(l) \
do { if (!((l)->write_locked_map & (1UL << smp_processor_id()))) \
printk("ASSERT %s:%u %s not writelocked\n", __FILE__, __LINE__, #l); \
} while(0)
#define MUST_BE_READ_WRITE_UNLOCKED(l) \
do { if ((l)->read_locked_map & (1UL << smp_processor_id())) \
printk("ASSERT %s:%u %s readlocked\n", __FILE__, __LINE__, #l); \
else if ((l)->write_locked_map & (1UL << smp_processor_id())) \
printk("ASSERT %s:%u %s writelocked\n", __FILE__, __LINE__, #l); \
} while(0)
#define LOCK_BH(lk) \
do { \
MUST_BE_UNLOCKED(lk); \
spin_lock_bh(&(lk)->l); \
atomic_set(&(lk)->locked_by, smp_processor_id()); \
} while(0)
#define UNLOCK_BH(lk) \
do { \
MUST_BE_LOCKED(lk); \
atomic_set(&(lk)->locked_by, -1); \
spin_unlock_bh(&(lk)->l); \
} while(0)
#define READ_LOCK(lk) \
do { \
MUST_BE_READ_WRITE_UNLOCKED(lk); \
read_lock_bh(&(lk)->l); \
set_bit(smp_processor_id(), &(lk)->read_locked_map); \
} while(0)
#define WRITE_LOCK(lk) \
do { \
MUST_BE_READ_WRITE_UNLOCKED(lk); \
write_lock_bh(&(lk)->l); \
set_bit(smp_processor_id(), &(lk)->write_locked_map); \
} while(0)
#define READ_UNLOCK(lk) \
do { \
if (!((lk)->read_locked_map & (1UL << smp_processor_id()))) \
printk("ASSERT: %s:%u %s not readlocked\n", \
__FILE__, __LINE__, #lk); \
clear_bit(smp_processor_id(), &(lk)->read_locked_map); \
read_unlock_bh(&(lk)->l); \
} while(0)
#define WRITE_UNLOCK(lk) \
do { \
MUST_BE_WRITE_LOCKED(lk); \
clear_bit(smp_processor_id(), &(lk)->write_locked_map); \
write_unlock_bh(&(lk)->l); \
} while(0)
#else
#define DECLARE_LOCK(l) spinlock_t l = SPIN_LOCK_UNLOCKED
#define DECLARE_LOCK_EXTERN(l) extern spinlock_t l
#define DECLARE_RWLOCK(l) rwlock_t l = RW_LOCK_UNLOCKED
#define DECLARE_RWLOCK_EXTERN(l) extern rwlock_t l
#define MUST_BE_LOCKED(l)
#define MUST_BE_UNLOCKED(l)
#define MUST_BE_READ_LOCKED(l)
#define MUST_BE_WRITE_LOCKED(l)
#define MUST_BE_READ_WRITE_UNLOCKED(l)
#define LOCK_BH(l) spin_lock_bh(l)
#define UNLOCK_BH(l) spin_unlock_bh(l)
#define READ_LOCK(l) read_lock_bh(l)
#define WRITE_LOCK(l) write_lock_bh(l)
#define READ_UNLOCK(l) read_unlock_bh(l)
#define WRITE_UNLOCK(l) write_unlock_bh(l)
#endif /*CONFIG_NETFILTER_DEBUG*/
#endif /* _LOCKHELP_H */
......@@ -60,7 +60,6 @@ static DECLARE_MUTEX(arpt_mutex);
#define ASSERT_READ_LOCK(x) ARP_NF_ASSERT(down_trylock(&arpt_mutex) != 0)
#define ASSERT_WRITE_LOCK(x) ARP_NF_ASSERT(down_trylock(&arpt_mutex) != 0)
#include <linux/netfilter_ipv4/lockhelp.h>
#include <linux/netfilter_ipv4/listhelp.h>
struct arpt_table_info {
......
......@@ -26,7 +26,6 @@
#include <net/checksum.h>
#include <net/udp.h>
#include <linux/netfilter_ipv4/lockhelp.h>
#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
#include <linux/netfilter_ipv4/ip_conntrack_amanda.h>
......@@ -42,7 +41,7 @@ static char *conns[] = { "DATA ", "MESG ", "INDEX " };
/* This is slow, but it's simple. --RR */
static char amanda_buffer[65536];
static DECLARE_LOCK(amanda_buffer_lock);
static DEFINE_SPINLOCK(amanda_buffer_lock);
unsigned int (*ip_nat_amanda_hook)(struct sk_buff **pskb,
enum ip_conntrack_info ctinfo,
......@@ -76,7 +75,7 @@ static int help(struct sk_buff **pskb,
return NF_ACCEPT;
}
LOCK_BH(&amanda_buffer_lock);
spin_lock_bh(&amanda_buffer_lock);
skb_copy_bits(*pskb, dataoff, amanda_buffer, (*pskb)->len - dataoff);
data = amanda_buffer;
data_limit = amanda_buffer + (*pskb)->len - dataoff;
......@@ -134,7 +133,7 @@ static int help(struct sk_buff **pskb,
}
out:
UNLOCK_BH(&amanda_buffer_lock);
spin_unlock_bh(&amanda_buffer_lock);
return ret;
}
......
......@@ -38,10 +38,10 @@
#include <linux/percpu.h>
#include <linux/moduleparam.h>
/* This rwlock protects the main hash table, protocol/helper/expected
/* ip_conntrack_lock protects the main hash table, protocol/helper/expected
registrations, conntrack timers*/
#define ASSERT_READ_LOCK(x) MUST_BE_READ_LOCKED(&ip_conntrack_lock)
#define ASSERT_WRITE_LOCK(x) MUST_BE_WRITE_LOCKED(&ip_conntrack_lock)
#define ASSERT_READ_LOCK(x)
#define ASSERT_WRITE_LOCK(x)
#include <linux/netfilter_ipv4/ip_conntrack.h>
#include <linux/netfilter_ipv4/ip_conntrack_protocol.h>
......@@ -57,7 +57,7 @@
#define DEBUGP(format, args...)
#endif
DECLARE_RWLOCK(ip_conntrack_lock);
DEFINE_RWLOCK(ip_conntrack_lock);
/* ip_conntrack_standalone needs this */
atomic_t ip_conntrack_count = ATOMIC_INIT(0);
......@@ -147,7 +147,7 @@ static void destroy_expect(struct ip_conntrack_expect *exp)
static void unlink_expect(struct ip_conntrack_expect *exp)
{
MUST_BE_WRITE_LOCKED(&ip_conntrack_lock);
ASSERT_WRITE_LOCK(&ip_conntrack_lock);
list_del(&exp->list);
/* Logically in destroy_expect, but we hold the lock here. */
exp->master->expecting--;
......@@ -157,9 +157,9 @@ static void expectation_timed_out(unsigned long ul_expect)
{
struct ip_conntrack_expect *exp = (void *)ul_expect;
WRITE_LOCK(&ip_conntrack_lock);
write_lock_bh(&ip_conntrack_lock);
unlink_expect(exp);
WRITE_UNLOCK(&ip_conntrack_lock);
write_unlock_bh(&ip_conntrack_lock);
destroy_expect(exp);
}
......@@ -209,7 +209,7 @@ clean_from_lists(struct ip_conntrack *ct)
unsigned int ho, hr;
DEBUGP("clean_from_lists(%p)\n", ct);
MUST_BE_WRITE_LOCKED(&ip_conntrack_lock);
ASSERT_WRITE_LOCK(&ip_conntrack_lock);
ho = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
hr = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
......@@ -240,7 +240,7 @@ destroy_conntrack(struct nf_conntrack *nfct)
if (ip_conntrack_destroyed)
ip_conntrack_destroyed(ct);
WRITE_LOCK(&ip_conntrack_lock);
write_lock_bh(&ip_conntrack_lock);
/* Expectations will have been removed in clean_from_lists,
* except TFTP can create an expectation on the first packet,
* before connection is in the list, so we need to clean here,
......@@ -254,7 +254,7 @@ destroy_conntrack(struct nf_conntrack *nfct)
}
CONNTRACK_STAT_INC(delete);
WRITE_UNLOCK(&ip_conntrack_lock);
write_unlock_bh(&ip_conntrack_lock);
if (ct->master)
ip_conntrack_put(ct->master);
......@@ -268,12 +268,12 @@ static void death_by_timeout(unsigned long ul_conntrack)
{
struct ip_conntrack *ct = (void *)ul_conntrack;
WRITE_LOCK(&ip_conntrack_lock);
write_lock_bh(&ip_conntrack_lock);
/* Inside lock so preempt is disabled on module removal path.
* Otherwise we can get spurious warnings. */
CONNTRACK_STAT_INC(delete_list);
clean_from_lists(ct);
WRITE_UNLOCK(&ip_conntrack_lock);
write_unlock_bh(&ip_conntrack_lock);
ip_conntrack_put(ct);
}
......@@ -282,7 +282,7 @@ conntrack_tuple_cmp(const struct ip_conntrack_tuple_hash *i,
const struct ip_conntrack_tuple *tuple,
const struct ip_conntrack *ignored_conntrack)
{
MUST_BE_READ_LOCKED(&ip_conntrack_lock);
ASSERT_READ_LOCK(&ip_conntrack_lock);
return tuplehash_to_ctrack(i) != ignored_conntrack
&& ip_ct_tuple_equal(tuple, &i->tuple);
}
......@@ -294,7 +294,7 @@ __ip_conntrack_find(const struct ip_conntrack_tuple *tuple,
struct ip_conntrack_tuple_hash *h;
unsigned int hash = hash_conntrack(tuple);
MUST_BE_READ_LOCKED(&ip_conntrack_lock);
ASSERT_READ_LOCK(&ip_conntrack_lock);
list_for_each_entry(h, &ip_conntrack_hash[hash], list) {
if (conntrack_tuple_cmp(h, tuple, ignored_conntrack)) {
CONNTRACK_STAT_INC(found);
......@@ -313,11 +313,11 @@ ip_conntrack_find_get(const struct ip_conntrack_tuple *tuple,
{
struct ip_conntrack_tuple_hash *h;
READ_LOCK(&ip_conntrack_lock);
read_lock_bh(&ip_conntrack_lock);
h = __ip_conntrack_find(tuple, ignored_conntrack);
if (h)
atomic_inc(&tuplehash_to_ctrack(h)->ct_general.use);
READ_UNLOCK(&ip_conntrack_lock);
read_unlock_bh(&ip_conntrack_lock);
return h;
}
......@@ -352,7 +352,7 @@ __ip_conntrack_confirm(struct sk_buff **pskb)
IP_NF_ASSERT(!is_confirmed(ct));
DEBUGP("Confirming conntrack %p\n", ct);
WRITE_LOCK(&ip_conntrack_lock);
write_lock_bh(&ip_conntrack_lock);
/* See if there's one in the list already, including reverse:
NAT could have grabbed it without realizing, since we're
......@@ -380,12 +380,12 @@ __ip_conntrack_confirm(struct sk_buff **pskb)
atomic_inc(&ct->ct_general.use);
set_bit(IPS_CONFIRMED_BIT, &ct->status);
CONNTRACK_STAT_INC(insert);
WRITE_UNLOCK(&ip_conntrack_lock);
write_unlock_bh(&ip_conntrack_lock);
return NF_ACCEPT;
}
CONNTRACK_STAT_INC(insert_failed);
WRITE_UNLOCK(&ip_conntrack_lock);
write_unlock_bh(&ip_conntrack_lock);
return NF_DROP;
}
......@@ -398,9 +398,9 @@ ip_conntrack_tuple_taken(const struct ip_conntrack_tuple *tuple,
{
struct ip_conntrack_tuple_hash *h;
READ_LOCK(&ip_conntrack_lock);
read_lock_bh(&ip_conntrack_lock);
h = __ip_conntrack_find(tuple, ignored_conntrack);
READ_UNLOCK(&ip_conntrack_lock);
read_unlock_bh(&ip_conntrack_lock);
return h != NULL;
}
......@@ -419,13 +419,13 @@ static int early_drop(struct list_head *chain)
struct ip_conntrack *ct = NULL;
int dropped = 0;
READ_LOCK(&ip_conntrack_lock);
read_lock_bh(&ip_conntrack_lock);
h = LIST_FIND_B(chain, unreplied, struct ip_conntrack_tuple_hash *);
if (h) {
ct = tuplehash_to_ctrack(h);
atomic_inc(&ct->ct_general.use);
}
READ_UNLOCK(&ip_conntrack_lock);
read_unlock_bh(&ip_conntrack_lock);
if (!ct)
return dropped;
......@@ -508,7 +508,7 @@ init_conntrack(const struct ip_conntrack_tuple *tuple,
conntrack->timeout.data = (unsigned long)conntrack;
conntrack->timeout.function = death_by_timeout;
WRITE_LOCK(&ip_conntrack_lock);
write_lock_bh(&ip_conntrack_lock);
exp = find_expectation(tuple);
if (exp) {
......@@ -532,7 +532,7 @@ init_conntrack(const struct ip_conntrack_tuple *tuple,
list_add(&conntrack->tuplehash[IP_CT_DIR_ORIGINAL].list, &unconfirmed);
atomic_inc(&ip_conntrack_count);
WRITE_UNLOCK(&ip_conntrack_lock);
write_unlock_bh(&ip_conntrack_lock);
if (exp) {
if (exp->expectfn)
......@@ -723,17 +723,17 @@ void ip_conntrack_unexpect_related(struct ip_conntrack_expect *exp)
{
struct ip_conntrack_expect *i;
WRITE_LOCK(&ip_conntrack_lock);
write_lock_bh(&ip_conntrack_lock);
/* choose the the oldest expectation to evict */
list_for_each_entry_reverse(i, &ip_conntrack_expect_list, list) {
if (expect_matches(i, exp) && del_timer(&i->timeout)) {
unlink_expect(i);
WRITE_UNLOCK(&ip_conntrack_lock);
write_unlock_bh(&ip_conntrack_lock);
destroy_expect(i);
return;
}
}
WRITE_UNLOCK(&ip_conntrack_lock);
write_unlock_bh(&ip_conntrack_lock);
}
struct ip_conntrack_expect *ip_conntrack_expect_alloc(void)
......@@ -808,7 +808,7 @@ int ip_conntrack_expect_related(struct ip_conntrack_expect *expect)
DEBUGP("tuple: "); DUMP_TUPLE(&expect->tuple);
DEBUGP("mask: "); DUMP_TUPLE(&expect->mask);
WRITE_LOCK(&ip_conntrack_lock);
write_lock_bh(&ip_conntrack_lock);
list_for_each_entry(i, &ip_conntrack_expect_list, list) {
if (expect_matches(i, expect)) {
/* Refresh timer: if it's dying, ignore.. */
......@@ -832,7 +832,7 @@ int ip_conntrack_expect_related(struct ip_conntrack_expect *expect)
ip_conntrack_expect_insert(expect);
ret = 0;
out:
WRITE_UNLOCK(&ip_conntrack_lock);
write_unlock_bh(&ip_conntrack_lock);
return ret;
}
......@@ -841,7 +841,7 @@ out:
void ip_conntrack_alter_reply(struct ip_conntrack *conntrack,
const struct ip_conntrack_tuple *newreply)
{
WRITE_LOCK(&ip_conntrack_lock);
write_lock_bh(&ip_conntrack_lock);
/* Should be unconfirmed, so not in hash table yet */
IP_NF_ASSERT(!is_confirmed(conntrack));
......@@ -851,15 +851,15 @@ void ip_conntrack_alter_reply(struct ip_conntrack *conntrack,
conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
if (!conntrack->master && conntrack->expecting == 0)
conntrack->helper = ip_ct_find_helper(newreply);
WRITE_UNLOCK(&ip_conntrack_lock);
write_unlock_bh(&ip_conntrack_lock);
}
int ip_conntrack_helper_register(struct ip_conntrack_helper *me)
{
BUG_ON(me->timeout == 0);
WRITE_LOCK(&ip_conntrack_lock);
write_lock_bh(&ip_conntrack_lock);
list_prepend(&helpers, me);
WRITE_UNLOCK(&ip_conntrack_lock);
write_unlock_bh(&ip_conntrack_lock);
return 0;
}
......@@ -878,7 +878,7 @@ void ip_conntrack_helper_unregister(struct ip_conntrack_helper *me)
struct ip_conntrack_expect *exp, *tmp;
/* Need write lock here, to delete helper. */
WRITE_LOCK(&ip_conntrack_lock);
write_lock_bh(&ip_conntrack_lock);
LIST_DELETE(&helpers, me);
/* Get rid of expectations */
......@@ -893,7 +893,7 @@ void ip_conntrack_helper_unregister(struct ip_conntrack_helper *me)
for (i = 0; i < ip_conntrack_htable_size; i++)
LIST_FIND_W(&ip_conntrack_hash[i], unhelp,
struct ip_conntrack_tuple_hash *, me);
WRITE_UNLOCK(&ip_conntrack_lock);
write_unlock_bh(&ip_conntrack_lock);
/* Someone could be still looking at the helper in a bh. */
synchronize_net();
......@@ -925,14 +925,14 @@ void ip_ct_refresh_acct(struct ip_conntrack *ct,
ct->timeout.expires = extra_jiffies;
ct_add_counters(ct, ctinfo, skb);
} else {
WRITE_LOCK(&ip_conntrack_lock);
write_lock_bh(&ip_conntrack_lock);
/* Need del_timer for race avoidance (may already be dying). */
if (del_timer(&ct->timeout)) {
ct->timeout.expires = jiffies + extra_jiffies;
add_timer(&ct->timeout);
}
ct_add_counters(ct, ctinfo, skb);
WRITE_UNLOCK(&ip_conntrack_lock);
write_unlock_bh(&ip_conntrack_lock);
}
}
......@@ -997,7 +997,7 @@ get_next_corpse(int (*iter)(struct ip_conntrack *i, void *data),
{
struct ip_conntrack_tuple_hash *h = NULL;
WRITE_LOCK(&ip_conntrack_lock);
write_lock_bh(&ip_conntrack_lock);
for (; *bucket < ip_conntrack_htable_size; (*bucket)++) {
h = LIST_FIND_W(&ip_conntrack_hash[*bucket], do_iter,
struct ip_conntrack_tuple_hash *, iter, data);
......@@ -1009,7 +1009,7 @@ get_next_corpse(int (*iter)(struct ip_conntrack *i, void *data),
struct ip_conntrack_tuple_hash *, iter, data);
if (h)
atomic_inc(&tuplehash_to_ctrack(h)->ct_general.use);
WRITE_UNLOCK(&ip_conntrack_lock);
write_unlock_bh(&ip_conntrack_lock);
return h;
}
......@@ -1201,14 +1201,14 @@ int __init ip_conntrack_init(void)
}
/* Don't NEED lock here, but good form anyway. */
WRITE_LOCK(&ip_conntrack_lock);
write_lock_bh(&ip_conntrack_lock);
for (i = 0; i < MAX_IP_CT_PROTO; i++)
ip_ct_protos[i] = &ip_conntrack_generic_protocol;
/* Sew in builtin protocols. */
ip_ct_protos[IPPROTO_TCP] = &ip_conntrack_protocol_tcp;
ip_ct_protos[IPPROTO_UDP] = &ip_conntrack_protocol_udp;
ip_ct_protos[IPPROTO_ICMP] = &ip_conntrack_protocol_icmp;
WRITE_UNLOCK(&ip_conntrack_lock);
write_unlock_bh(&ip_conntrack_lock);
for (i = 0; i < ip_conntrack_htable_size; i++)
INIT_LIST_HEAD(&ip_conntrack_hash[i]);
......
......@@ -16,7 +16,6 @@
#include <net/checksum.h>
#include <net/tcp.h>
#include <linux/netfilter_ipv4/lockhelp.h>
#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
#include <linux/netfilter_ipv4/ip_conntrack_ftp.h>
#include <linux/moduleparam.h>
......@@ -28,7 +27,7 @@ MODULE_DESCRIPTION("ftp connection tracking helper");
/* This is slow, but it's simple. --RR */
static char ftp_buffer[65536];
static DECLARE_LOCK(ip_ftp_lock);
static DEFINE_SPINLOCK(ip_ftp_lock);
#define MAX_PORTS 8
static int ports[MAX_PORTS];
......@@ -319,7 +318,7 @@ static int help(struct sk_buff **pskb,
}
datalen = (*pskb)->len - dataoff;
LOCK_BH(&ip_ftp_lock);
spin_lock_bh(&ip_ftp_lock);
fb_ptr = skb_header_pointer(*pskb, dataoff,
(*pskb)->len - dataoff, ftp_buffer);
BUG_ON(fb_ptr == NULL);
......@@ -442,7 +441,7 @@ out_update_nl:
if (ends_in_nl)
update_nl_seq(seq, ct_ftp_info,dir);
out:
UNLOCK_BH(&ip_ftp_lock);
spin_unlock_bh(&ip_ftp_lock);
return ret;
}
......
......@@ -29,7 +29,6 @@
#include <net/checksum.h>
#include <net/tcp.h>
#include <linux/netfilter_ipv4/lockhelp.h>
#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
#include <linux/netfilter_ipv4/ip_conntrack_irc.h>
#include <linux/moduleparam.h>
......@@ -41,7 +40,7 @@ static int max_dcc_channels = 8;
static unsigned int dcc_timeout = 300;
/* This is slow, but it's simple. --RR */
static char irc_buffer[65536];
static DECLARE_LOCK(irc_buffer_lock);
static DEFINE_SPINLOCK(irc_buffer_lock);
unsigned int (*ip_nat_irc_hook)(struct sk_buff **pskb,
enum ip_conntrack_info ctinfo,
......@@ -141,7 +140,7 @@ static int help(struct sk_buff **pskb,
if (dataoff >= (*pskb)->len)
return NF_ACCEPT;
LOCK_BH(&irc_buffer_lock);
spin_lock_bh(&irc_buffer_lock);
ib_ptr = skb_header_pointer(*pskb, dataoff,
(*pskb)->len - dataoff, irc_buffer);
BUG_ON(ib_ptr == NULL);
......@@ -237,7 +236,7 @@ static int help(struct sk_buff **pskb,
} /* while data < ... */
out:
UNLOCK_BH(&irc_buffer_lock);
spin_unlock_bh(&irc_buffer_lock);
return ret;
}
......
......@@ -26,7 +26,6 @@
#include <linux/netfilter_ipv4/ip_conntrack.h>
#include <linux/netfilter_ipv4/ip_conntrack_protocol.h>
#include <linux/netfilter_ipv4/lockhelp.h>
#if 0
#define DEBUGP(format, ...) printk(format, ## __VA_ARGS__)
......@@ -35,7 +34,7 @@
#endif
/* Protects conntrack->proto.sctp */
static DECLARE_RWLOCK(sctp_lock);
static DEFINE_RWLOCK(sctp_lock);
/* FIXME: Examine ipfilter's timeouts and conntrack transitions more
closely. They're more complex. --RR
......@@ -199,9 +198,9 @@ static int sctp_print_conntrack(struct seq_file *s,
DEBUGP(__FUNCTION__);
DEBUGP("\n");
READ_LOCK(&sctp_lock);
read_lock_bh(&sctp_lock);
state = conntrack->proto.sctp.state;
READ_UNLOCK(&sctp_lock);
read_unlock_bh(&sctp_lock);
return seq_printf(s, "%s ", sctp_conntrack_names[state]);
}
......@@ -343,13 +342,13 @@ static int sctp_packet(struct ip_conntrack *conntrack,
oldsctpstate = newconntrack = SCTP_CONNTRACK_MAX;
for_each_sctp_chunk (skb, sch, _sch, offset, count) {
WRITE_LOCK(&sctp_lock);