Commit 547b792c authored by Ilpo Järvinen's avatar Ilpo Järvinen Committed by David S. Miller
Browse files

net: convert BUG_TRAP to generic WARN_ON



Removes legacy reinvent-the-wheel type thing. The generic
machinery integrates much better to automated debugging aids
such as kerneloops.org (and others), and is unambiguous due to
better naming. Non-intuively BUG_TRAP() is actually equal to
WARN_ON() rather than BUG_ON() though some might actually be
promoted to BUG_ON() but I left that to future.

I could make at least one BUILD_BUG_ON conversion.
Signed-off-by: default avatarIlpo Järvinen <ilpo.jarvinen@helsinki.fi>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 53e5e96e
......@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/bug.h>
#include <net/sock.h>
......@@ -170,7 +171,7 @@ static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue
{
struct request_sock *req = queue->rskq_accept_head;
BUG_TRAP(req != NULL);
WARN_ON(req == NULL);
queue->rskq_accept_head = req->dl_next;
if (queue->rskq_accept_head == NULL)
......@@ -185,7 +186,7 @@ static inline struct sock *reqsk_queue_get_child(struct request_sock_queue *queu
struct request_sock *req = reqsk_queue_remove(queue);
struct sock *child = req->sk;
BUG_TRAP(child != NULL);
WARN_ON(child == NULL);
sk_acceptq_removed(parent);
__reqsk_free(req);
......
......@@ -959,7 +959,7 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
BUG_TRAP(start <= offset + len);
WARN_ON(start > offset + len);
end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) {
......@@ -986,7 +986,7 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
for (; list; list = list->next) {
int end;
BUG_TRAP(start <= offset + len);
WARN_ON(start > offset + len);
end = start + list->len;
if ((copy = end - offset) > 0) {
......
......@@ -285,7 +285,7 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
BUG_TRAP(start <= offset + len);
WARN_ON(start > offset + len);
end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) {
......@@ -315,7 +315,7 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
for (; list; list = list->next) {
int end;
BUG_TRAP(start <= offset + len);
WARN_ON(start > offset + len);
end = start + list->len;
if ((copy = end - offset) > 0) {
......@@ -366,7 +366,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
BUG_TRAP(start <= offset + len);
WARN_ON(start > offset + len);
end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) {
......@@ -402,7 +402,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
for (; list; list=list->next) {
int end;
BUG_TRAP(start <= offset + len);
WARN_ON(start > offset + len);
end = start + list->len;
if ((copy = end - offset) > 0) {
......
......@@ -1973,7 +1973,7 @@ static void net_tx_action(struct softirq_action *h)
struct sk_buff *skb = clist;
clist = clist->next;
BUG_TRAP(!atomic_read(&skb->users));
WARN_ON(atomic_read(&skb->users));
__kfree_skb(skb);
}
}
......@@ -3847,7 +3847,7 @@ static void rollback_registered(struct net_device *dev)
dev->uninit(dev);
/* Notifier chain MUST detach us from master device. */
BUG_TRAP(!dev->master);
WARN_ON(dev->master);
/* Remove entries from kobject tree */
netdev_unregister_kobject(dev);
......@@ -4169,9 +4169,9 @@ void netdev_run_todo(void)
/* paranoia */
BUG_ON(atomic_read(&dev->refcnt));
BUG_TRAP(!dev->ip_ptr);
BUG_TRAP(!dev->ip6_ptr);
BUG_TRAP(!dev->dn_ptr);
WARN_ON(dev->ip_ptr);
WARN_ON(dev->ip6_ptr);
WARN_ON(dev->dn_ptr);
if (dev->destructor)
dev->destructor(dev);
......
......@@ -123,7 +123,7 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
}
}
BUG_TRAP(lopt->qlen == 0);
WARN_ON(lopt->qlen != 0);
if (lopt_size > PAGE_SIZE)
vfree(lopt);
else
......
......@@ -1200,7 +1200,7 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
BUG_TRAP(start <= offset + len);
WARN_ON(start > offset + len);
end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) {
......@@ -1229,7 +1229,7 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
for (; list; list = list->next) {
int end;
BUG_TRAP(start <= offset + len);
WARN_ON(start > offset + len);
end = start + list->len;
if ((copy = end - offset) > 0) {
......@@ -1475,7 +1475,7 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
int end;
BUG_TRAP(start <= offset + len);
WARN_ON(start > offset + len);
end = start + frag->size;
if ((copy = end - offset) > 0) {
......@@ -1503,7 +1503,7 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
for (; list; list = list->next) {
int end;
BUG_TRAP(start <= offset + len);
WARN_ON(start > offset + len);
end = start + list->len;
if ((copy = end - offset) > 0) {
......@@ -1552,7 +1552,7 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
BUG_TRAP(start <= offset + len);
WARN_ON(start > offset + len);
end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) {
......@@ -1581,7 +1581,7 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
for (; list; list = list->next) {
int end;
BUG_TRAP(start <= offset + len);
WARN_ON(start > offset + len);
end = start + list->len;
if ((copy = end - offset) > 0) {
......@@ -1629,7 +1629,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
BUG_TRAP(start <= offset + len);
WARN_ON(start > offset + len);
end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) {
......@@ -1662,7 +1662,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
__wsum csum2;
int end;
BUG_TRAP(start <= offset + len);
WARN_ON(start > offset + len);
end = start + list->len;
if ((copy = end - offset) > 0) {
......@@ -2373,7 +2373,7 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
BUG_TRAP(start <= offset + len);
WARN_ON(start > offset + len);
end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) {
......@@ -2397,7 +2397,7 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
for (; list; list = list->next) {
int end;
BUG_TRAP(start <= offset + len);
WARN_ON(start > offset + len);
end = start + list->len;
if ((copy = end - offset) > 0) {
......
......@@ -192,13 +192,13 @@ void sk_stream_kill_queues(struct sock *sk)
__skb_queue_purge(&sk->sk_error_queue);
/* Next, the write queue. */
BUG_TRAP(skb_queue_empty(&sk->sk_write_queue));
WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
/* Account for returned memory. */
sk_mem_reclaim(sk);
BUG_TRAP(!sk->sk_wmem_queued);
BUG_TRAP(!sk->sk_forward_alloc);
WARN_ON(sk->sk_wmem_queued);
WARN_ON(sk->sk_forward_alloc);
/* It is _impossible_ for the backlog to contain anything
* when we get here. All user references to this socket
......
......@@ -27,7 +27,6 @@
#include <linux/dmaengine.h>
#include <linux/socket.h>
#include <linux/rtnetlink.h> /* for BUG_TRAP */
#include <net/tcp.h>
#include <net/netdma.h>
......@@ -71,7 +70,7 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
BUG_TRAP(start <= offset + len);
WARN_ON(start > offset + len);
end = start + skb_shinfo(skb)->frags[i].size;
copy = end - offset;
......@@ -100,7 +99,7 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
for (; list; list = list->next) {
int end;
BUG_TRAP(start <= offset + len);
WARN_ON(start > offset + len);
end = start + list->len;
copy = end - offset;
......
......@@ -164,7 +164,7 @@ static inline bool dccp_loss_free(const u64 s1, const u64 s2, const u64 ndp)
{
s64 delta = dccp_delta_seqno(s1, s2);
BUG_TRAP(delta >= 0);
WARN_ON(delta < 0);
return (u64)delta <= ndp + 1;
}
......
......@@ -413,7 +413,7 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk,
/* Stop the REQUEST timer */
inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
BUG_TRAP(sk->sk_send_head != NULL);
WARN_ON(sk->sk_send_head == NULL);
__kfree_skb(sk->sk_send_head);
sk->sk_send_head = NULL;
......
......@@ -283,7 +283,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
* ICMPs are not backlogged, hence we cannot get an established
* socket here.
*/
BUG_TRAP(!req->sk);
WARN_ON(req->sk);
if (seq != dccp_rsk(req)->dreq_iss) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
......
......@@ -186,7 +186,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
* ICMPs are not backlogged, hence we cannot get an established
* socket here.
*/
BUG_TRAP(req->sk == NULL);
WARN_ON(req->sk != NULL);
if (seq != dccp_rsk(req)->dreq_iss) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
......
......@@ -327,7 +327,7 @@ int dccp_disconnect(struct sock *sk, int flags)
inet_csk_delack_init(sk);
__sk_dst_reset(sk);
BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
WARN_ON(inet->num && !icsk->icsk_bind_hash);
sk->sk_error_report(sk);
return err;
......@@ -981,7 +981,7 @@ adjudge_to_death:
*/
local_bh_disable();
bh_lock_sock(sk);
BUG_TRAP(!sock_owned_by_user(sk));
WARN_ON(sock_owned_by_user(sk));
/* Have we already been destroyed by a softirq or backlog? */
if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED)
......
......@@ -106,7 +106,7 @@ static void dccp_retransmit_timer(struct sock *sk)
* -- Acks in client-PARTOPEN state (sec. 8.1.5)
* -- CloseReq in server-CLOSEREQ state (sec. 8.3)
* -- Close in node-CLOSING state (sec. 8.3) */
BUG_TRAP(sk->sk_send_head != NULL);
WARN_ON(sk->sk_send_head == NULL);
/*
* More than than 4MSL (8 minutes) has passed, a RESET(aborted) was
......
......@@ -148,10 +148,10 @@ void inet_sock_destruct(struct sock *sk)
return;
}
BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc));
BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
BUG_TRAP(!sk->sk_wmem_queued);
BUG_TRAP(!sk->sk_forward_alloc);
WARN_ON(atomic_read(&sk->sk_rmem_alloc));
WARN_ON(atomic_read(&sk->sk_wmem_alloc));
WARN_ON(sk->sk_wmem_queued);
WARN_ON(sk->sk_forward_alloc);
kfree(inet->opt);
dst_release(sk->sk_dst_cache);
......@@ -341,7 +341,7 @@ lookup_protocol:
answer_flags = answer->flags;
rcu_read_unlock();
BUG_TRAP(answer_prot->slab != NULL);
WARN_ON(answer_prot->slab == NULL);
err = -ENOBUFS;
sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot);
......@@ -661,8 +661,8 @@ int inet_accept(struct socket *sock, struct socket *newsock, int flags)
lock_sock(sk2);
BUG_TRAP((1 << sk2->sk_state) &
(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_CLOSE));
WARN_ON(!((1 << sk2->sk_state) &
(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_CLOSE)));
sock_graft(sk2, newsock);
......
......@@ -138,8 +138,8 @@ void in_dev_finish_destroy(struct in_device *idev)
{
struct net_device *dev = idev->dev;
BUG_TRAP(!idev->ifa_list);
BUG_TRAP(!idev->mc_list);
WARN_ON(idev->ifa_list);
WARN_ON(idev->mc_list);
#ifdef NET_REFCNT_DEBUG
printk(KERN_DEBUG "in_dev_finish_destroy: %p=%s\n",
idev, dev ? dev->name : "NIL");
......@@ -399,7 +399,7 @@ static int inet_set_ifa(struct net_device *dev, struct in_ifaddr *ifa)
}
ipv4_devconf_setall(in_dev);
if (ifa->ifa_dev != in_dev) {
BUG_TRAP(!ifa->ifa_dev);
WARN_ON(ifa->ifa_dev);
in_dev_hold(in_dev);
ifa->ifa_dev = in_dev;
}
......
......@@ -167,7 +167,7 @@ tb_not_found:
success:
if (!inet_csk(sk)->icsk_bind_hash)
inet_bind_hash(sk, tb, snum);
BUG_TRAP(inet_csk(sk)->icsk_bind_hash == tb);
WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
ret = 0;
fail_unlock:
......@@ -260,7 +260,7 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
}
newsk = reqsk_queue_get_child(&icsk->icsk_accept_queue, sk);
BUG_TRAP(newsk->sk_state != TCP_SYN_RECV);
WARN_ON(newsk->sk_state == TCP_SYN_RECV);
out:
release_sock(sk);
return newsk;
......@@ -386,7 +386,7 @@ struct request_sock *inet_csk_search_req(const struct sock *sk,
ireq->rmt_addr == raddr &&
ireq->loc_addr == laddr &&
AF_INET_FAMILY(req->rsk_ops->family)) {
BUG_TRAP(!req->sk);
WARN_ON(req->sk);
*prevp = prev;
break;
}
......@@ -539,14 +539,14 @@ EXPORT_SYMBOL_GPL(inet_csk_clone);
*/
void inet_csk_destroy_sock(struct sock *sk)
{
BUG_TRAP(sk->sk_state == TCP_CLOSE);
BUG_TRAP(sock_flag(sk, SOCK_DEAD));
WARN_ON(sk->sk_state != TCP_CLOSE);
WARN_ON(!sock_flag(sk, SOCK_DEAD));
/* It cannot be in hash table! */
BUG_TRAP(sk_unhashed(sk));
WARN_ON(!sk_unhashed(sk));
/* If it has not 0 inet_sk(sk)->num, it must be bound */
BUG_TRAP(!inet_sk(sk)->num || inet_csk(sk)->icsk_bind_hash);
WARN_ON(inet_sk(sk)->num && !inet_csk(sk)->icsk_bind_hash);
sk->sk_prot->destroy(sk);
......@@ -629,7 +629,7 @@ void inet_csk_listen_stop(struct sock *sk)
local_bh_disable();
bh_lock_sock(child);
BUG_TRAP(!sock_owned_by_user(child));
WARN_ON(sock_owned_by_user(child));
sock_hold(child);
sk->sk_prot->disconnect(child, O_NONBLOCK);
......@@ -647,7 +647,7 @@ void inet_csk_listen_stop(struct sock *sk)
sk_acceptq_removed(sk);
__reqsk_free(req);
}
BUG_TRAP(!sk->sk_ack_backlog);
WARN_ON(sk->sk_ack_backlog);
}
EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
......
......@@ -134,8 +134,8 @@ void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f,
struct sk_buff *fp;
struct netns_frags *nf;
BUG_TRAP(q->last_in & INET_FRAG_COMPLETE);
BUG_TRAP(del_timer(&q->timer) == 0);
WARN_ON(!(q->last_in & INET_FRAG_COMPLETE));
WARN_ON(del_timer(&q->timer) != 0);
/* Release all fragment data. */
fp = q->fragments;
......
......@@ -305,7 +305,7 @@ unique:
inet->num = lport;
inet->sport = htons(lport);
sk->sk_hash = hash;
BUG_TRAP(sk_unhashed(sk));
WARN_ON(!sk_unhashed(sk));
__sk_add_node(sk, &head->chain);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
write_unlock(lock);
......@@ -342,7 +342,7 @@ void __inet_hash_nolisten(struct sock *sk)
rwlock_t *lock;
struct inet_ehash_bucket *head;
BUG_TRAP(sk_unhashed(sk));
WARN_ON(!sk_unhashed(sk));
sk->sk_hash = inet_sk_ehashfn(sk);
head = inet_ehash_bucket(hashinfo, sk->sk_hash);
......@@ -367,7 +367,7 @@ static void __inet_hash(struct sock *sk)
return;
}
BUG_TRAP(sk_unhashed(sk));
WARN_ON(!sk_unhashed(sk));
list = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
lock = &hashinfo->lhash_lock;
......@@ -450,7 +450,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
*/
inet_bind_bucket_for_each(tb, node, &head->chain) {
if (tb->ib_net == net && tb->port == port) {
BUG_TRAP(!hlist_empty(&tb->owners));
WARN_ON(hlist_empty(&tb->owners));
if (tb->fastreuse >= 0)
goto next_port;
if (!check_established(death_row, sk,
......
......@@ -86,7 +86,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
hashinfo->bhash_size)];
spin_lock(&bhead->lock);
tw->tw_tb = icsk->icsk_bind_hash;
BUG_TRAP(icsk->icsk_bind_hash);
WARN_ON(!icsk->icsk_bind_hash);
inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
spin_unlock(&bhead->lock);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment