net: convert BUG_TRAP to generic WARN_ON

Removes legacy reinvent-the-wheel type thing. The generic
machinery integrates much better to automated debugging aids
such as kerneloops.org (and others), and is unambiguous due to
better naming. Non-intuively BUG_TRAP() is actually equal to
WARN_ON() rather than BUG_ON() though some might actually be
promoted to BUG_ON() but I left that to future.

I could make at least one BUILD_BUG_ON conversion.

Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Ilpo Järvinen 2008-07-25 21:43:18 -07:00 коммит произвёл David S. Miller
Родитель 53e5e96ec1
Коммит 547b792cac
51 изменённых файлов: 159 добавлений и 155 удалений

Просмотреть файл

@ -18,6 +18,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/bug.h>
#include <net/sock.h> #include <net/sock.h>
@ -170,7 +171,7 @@ static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue
{ {
struct request_sock *req = queue->rskq_accept_head; struct request_sock *req = queue->rskq_accept_head;
BUG_TRAP(req != NULL); WARN_ON(req == NULL);
queue->rskq_accept_head = req->dl_next; queue->rskq_accept_head = req->dl_next;
if (queue->rskq_accept_head == NULL) if (queue->rskq_accept_head == NULL)
@ -185,7 +186,7 @@ static inline struct sock *reqsk_queue_get_child(struct request_sock_queue *queu
struct request_sock *req = reqsk_queue_remove(queue); struct request_sock *req = reqsk_queue_remove(queue);
struct sock *child = req->sk; struct sock *child = req->sk;
BUG_TRAP(child != NULL); WARN_ON(child == NULL);
sk_acceptq_removed(parent); sk_acceptq_removed(parent);
__reqsk_free(req); __reqsk_free(req);

Просмотреть файл

@ -959,7 +959,7 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end; int end;
BUG_TRAP(start <= offset + len); WARN_ON(start > offset + len);
end = start + skb_shinfo(skb)->frags[i].size; end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) { if ((copy = end - offset) > 0) {
@ -986,7 +986,7 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
for (; list; list = list->next) { for (; list; list = list->next) {
int end; int end;
BUG_TRAP(start <= offset + len); WARN_ON(start > offset + len);
end = start + list->len; end = start + list->len;
if ((copy = end - offset) > 0) { if ((copy = end - offset) > 0) {

Просмотреть файл

@ -285,7 +285,7 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end; int end;
BUG_TRAP(start <= offset + len); WARN_ON(start > offset + len);
end = start + skb_shinfo(skb)->frags[i].size; end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) { if ((copy = end - offset) > 0) {
@ -315,7 +315,7 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
for (; list; list = list->next) { for (; list; list = list->next) {
int end; int end;
BUG_TRAP(start <= offset + len); WARN_ON(start > offset + len);
end = start + list->len; end = start + list->len;
if ((copy = end - offset) > 0) { if ((copy = end - offset) > 0) {
@ -366,7 +366,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end; int end;
BUG_TRAP(start <= offset + len); WARN_ON(start > offset + len);
end = start + skb_shinfo(skb)->frags[i].size; end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) { if ((copy = end - offset) > 0) {
@ -402,7 +402,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
for (; list; list=list->next) { for (; list; list=list->next) {
int end; int end;
BUG_TRAP(start <= offset + len); WARN_ON(start > offset + len);
end = start + list->len; end = start + list->len;
if ((copy = end - offset) > 0) { if ((copy = end - offset) > 0) {

Просмотреть файл

@ -1973,7 +1973,7 @@ static void net_tx_action(struct softirq_action *h)
struct sk_buff *skb = clist; struct sk_buff *skb = clist;
clist = clist->next; clist = clist->next;
BUG_TRAP(!atomic_read(&skb->users)); WARN_ON(atomic_read(&skb->users));
__kfree_skb(skb); __kfree_skb(skb);
} }
} }
@ -3847,7 +3847,7 @@ static void rollback_registered(struct net_device *dev)
dev->uninit(dev); dev->uninit(dev);
/* Notifier chain MUST detach us from master device. */ /* Notifier chain MUST detach us from master device. */
BUG_TRAP(!dev->master); WARN_ON(dev->master);
/* Remove entries from kobject tree */ /* Remove entries from kobject tree */
netdev_unregister_kobject(dev); netdev_unregister_kobject(dev);
@ -4169,9 +4169,9 @@ void netdev_run_todo(void)
/* paranoia */ /* paranoia */
BUG_ON(atomic_read(&dev->refcnt)); BUG_ON(atomic_read(&dev->refcnt));
BUG_TRAP(!dev->ip_ptr); WARN_ON(dev->ip_ptr);
BUG_TRAP(!dev->ip6_ptr); WARN_ON(dev->ip6_ptr);
BUG_TRAP(!dev->dn_ptr); WARN_ON(dev->dn_ptr);
if (dev->destructor) if (dev->destructor)
dev->destructor(dev); dev->destructor(dev);

Просмотреть файл

@ -123,7 +123,7 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
} }
} }
BUG_TRAP(lopt->qlen == 0); WARN_ON(lopt->qlen != 0);
if (lopt_size > PAGE_SIZE) if (lopt_size > PAGE_SIZE)
vfree(lopt); vfree(lopt);
else else

Просмотреть файл

@ -1200,7 +1200,7 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end; int end;
BUG_TRAP(start <= offset + len); WARN_ON(start > offset + len);
end = start + skb_shinfo(skb)->frags[i].size; end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) { if ((copy = end - offset) > 0) {
@ -1229,7 +1229,7 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
for (; list; list = list->next) { for (; list; list = list->next) {
int end; int end;
BUG_TRAP(start <= offset + len); WARN_ON(start > offset + len);
end = start + list->len; end = start + list->len;
if ((copy = end - offset) > 0) { if ((copy = end - offset) > 0) {
@ -1475,7 +1475,7 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
int end; int end;
BUG_TRAP(start <= offset + len); WARN_ON(start > offset + len);
end = start + frag->size; end = start + frag->size;
if ((copy = end - offset) > 0) { if ((copy = end - offset) > 0) {
@ -1503,7 +1503,7 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
for (; list; list = list->next) { for (; list; list = list->next) {
int end; int end;
BUG_TRAP(start <= offset + len); WARN_ON(start > offset + len);
end = start + list->len; end = start + list->len;
if ((copy = end - offset) > 0) { if ((copy = end - offset) > 0) {
@ -1552,7 +1552,7 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end; int end;
BUG_TRAP(start <= offset + len); WARN_ON(start > offset + len);
end = start + skb_shinfo(skb)->frags[i].size; end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) { if ((copy = end - offset) > 0) {
@ -1581,7 +1581,7 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
for (; list; list = list->next) { for (; list; list = list->next) {
int end; int end;
BUG_TRAP(start <= offset + len); WARN_ON(start > offset + len);
end = start + list->len; end = start + list->len;
if ((copy = end - offset) > 0) { if ((copy = end - offset) > 0) {
@ -1629,7 +1629,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end; int end;
BUG_TRAP(start <= offset + len); WARN_ON(start > offset + len);
end = start + skb_shinfo(skb)->frags[i].size; end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) { if ((copy = end - offset) > 0) {
@ -1662,7 +1662,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
__wsum csum2; __wsum csum2;
int end; int end;
BUG_TRAP(start <= offset + len); WARN_ON(start > offset + len);
end = start + list->len; end = start + list->len;
if ((copy = end - offset) > 0) { if ((copy = end - offset) > 0) {
@ -2373,7 +2373,7 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end; int end;
BUG_TRAP(start <= offset + len); WARN_ON(start > offset + len);
end = start + skb_shinfo(skb)->frags[i].size; end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) { if ((copy = end - offset) > 0) {
@ -2397,7 +2397,7 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
for (; list; list = list->next) { for (; list; list = list->next) {
int end; int end;
BUG_TRAP(start <= offset + len); WARN_ON(start > offset + len);
end = start + list->len; end = start + list->len;
if ((copy = end - offset) > 0) { if ((copy = end - offset) > 0) {

Просмотреть файл

@ -192,13 +192,13 @@ void sk_stream_kill_queues(struct sock *sk)
__skb_queue_purge(&sk->sk_error_queue); __skb_queue_purge(&sk->sk_error_queue);
/* Next, the write queue. */ /* Next, the write queue. */
BUG_TRAP(skb_queue_empty(&sk->sk_write_queue)); WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
/* Account for returned memory. */ /* Account for returned memory. */
sk_mem_reclaim(sk); sk_mem_reclaim(sk);
BUG_TRAP(!sk->sk_wmem_queued); WARN_ON(sk->sk_wmem_queued);
BUG_TRAP(!sk->sk_forward_alloc); WARN_ON(sk->sk_forward_alloc);
/* It is _impossible_ for the backlog to contain anything /* It is _impossible_ for the backlog to contain anything
* when we get here. All user references to this socket * when we get here. All user references to this socket

Просмотреть файл

@ -27,7 +27,6 @@
#include <linux/dmaengine.h> #include <linux/dmaengine.h>
#include <linux/socket.h> #include <linux/socket.h>
#include <linux/rtnetlink.h> /* for BUG_TRAP */
#include <net/tcp.h> #include <net/tcp.h>
#include <net/netdma.h> #include <net/netdma.h>
@ -71,7 +70,7 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end; int end;
BUG_TRAP(start <= offset + len); WARN_ON(start > offset + len);
end = start + skb_shinfo(skb)->frags[i].size; end = start + skb_shinfo(skb)->frags[i].size;
copy = end - offset; copy = end - offset;
@ -100,7 +99,7 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
for (; list; list = list->next) { for (; list; list = list->next) {
int end; int end;
BUG_TRAP(start <= offset + len); WARN_ON(start > offset + len);
end = start + list->len; end = start + list->len;
copy = end - offset; copy = end - offset;

Просмотреть файл

@ -164,7 +164,7 @@ static inline bool dccp_loss_free(const u64 s1, const u64 s2, const u64 ndp)
{ {
s64 delta = dccp_delta_seqno(s1, s2); s64 delta = dccp_delta_seqno(s1, s2);
BUG_TRAP(delta >= 0); WARN_ON(delta < 0);
return (u64)delta <= ndp + 1; return (u64)delta <= ndp + 1;
} }

Просмотреть файл

@ -413,7 +413,7 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk,
/* Stop the REQUEST timer */ /* Stop the REQUEST timer */
inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
BUG_TRAP(sk->sk_send_head != NULL); WARN_ON(sk->sk_send_head == NULL);
__kfree_skb(sk->sk_send_head); __kfree_skb(sk->sk_send_head);
sk->sk_send_head = NULL; sk->sk_send_head = NULL;

Просмотреть файл

@ -283,7 +283,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
* ICMPs are not backlogged, hence we cannot get an established * ICMPs are not backlogged, hence we cannot get an established
* socket here. * socket here.
*/ */
BUG_TRAP(!req->sk); WARN_ON(req->sk);
if (seq != dccp_rsk(req)->dreq_iss) { if (seq != dccp_rsk(req)->dreq_iss) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);

Просмотреть файл

@ -186,7 +186,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
* ICMPs are not backlogged, hence we cannot get an established * ICMPs are not backlogged, hence we cannot get an established
* socket here. * socket here.
*/ */
BUG_TRAP(req->sk == NULL); WARN_ON(req->sk != NULL);
if (seq != dccp_rsk(req)->dreq_iss) { if (seq != dccp_rsk(req)->dreq_iss) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);

Просмотреть файл

@ -327,7 +327,7 @@ int dccp_disconnect(struct sock *sk, int flags)
inet_csk_delack_init(sk); inet_csk_delack_init(sk);
__sk_dst_reset(sk); __sk_dst_reset(sk);
BUG_TRAP(!inet->num || icsk->icsk_bind_hash); WARN_ON(inet->num && !icsk->icsk_bind_hash);
sk->sk_error_report(sk); sk->sk_error_report(sk);
return err; return err;
@ -981,7 +981,7 @@ adjudge_to_death:
*/ */
local_bh_disable(); local_bh_disable();
bh_lock_sock(sk); bh_lock_sock(sk);
BUG_TRAP(!sock_owned_by_user(sk)); WARN_ON(sock_owned_by_user(sk));
/* Have we already been destroyed by a softirq or backlog? */ /* Have we already been destroyed by a softirq or backlog? */
if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED) if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED)

Просмотреть файл

@ -106,7 +106,7 @@ static void dccp_retransmit_timer(struct sock *sk)
* -- Acks in client-PARTOPEN state (sec. 8.1.5) * -- Acks in client-PARTOPEN state (sec. 8.1.5)
* -- CloseReq in server-CLOSEREQ state (sec. 8.3) * -- CloseReq in server-CLOSEREQ state (sec. 8.3)
* -- Close in node-CLOSING state (sec. 8.3) */ * -- Close in node-CLOSING state (sec. 8.3) */
BUG_TRAP(sk->sk_send_head != NULL); WARN_ON(sk->sk_send_head == NULL);
/* /*
* More than than 4MSL (8 minutes) has passed, a RESET(aborted) was * More than than 4MSL (8 minutes) has passed, a RESET(aborted) was

Просмотреть файл

@ -148,10 +148,10 @@ void inet_sock_destruct(struct sock *sk)
return; return;
} }
BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc)); WARN_ON(atomic_read(&sk->sk_rmem_alloc));
BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc)); WARN_ON(atomic_read(&sk->sk_wmem_alloc));
BUG_TRAP(!sk->sk_wmem_queued); WARN_ON(sk->sk_wmem_queued);
BUG_TRAP(!sk->sk_forward_alloc); WARN_ON(sk->sk_forward_alloc);
kfree(inet->opt); kfree(inet->opt);
dst_release(sk->sk_dst_cache); dst_release(sk->sk_dst_cache);
@ -341,7 +341,7 @@ lookup_protocol:
answer_flags = answer->flags; answer_flags = answer->flags;
rcu_read_unlock(); rcu_read_unlock();
BUG_TRAP(answer_prot->slab != NULL); WARN_ON(answer_prot->slab == NULL);
err = -ENOBUFS; err = -ENOBUFS;
sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot); sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot);
@ -661,8 +661,8 @@ int inet_accept(struct socket *sock, struct socket *newsock, int flags)
lock_sock(sk2); lock_sock(sk2);
BUG_TRAP((1 << sk2->sk_state) & WARN_ON(!((1 << sk2->sk_state) &
(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_CLOSE)); (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_CLOSE)));
sock_graft(sk2, newsock); sock_graft(sk2, newsock);

Просмотреть файл

@ -138,8 +138,8 @@ void in_dev_finish_destroy(struct in_device *idev)
{ {
struct net_device *dev = idev->dev; struct net_device *dev = idev->dev;
BUG_TRAP(!idev->ifa_list); WARN_ON(idev->ifa_list);
BUG_TRAP(!idev->mc_list); WARN_ON(idev->mc_list);
#ifdef NET_REFCNT_DEBUG #ifdef NET_REFCNT_DEBUG
printk(KERN_DEBUG "in_dev_finish_destroy: %p=%s\n", printk(KERN_DEBUG "in_dev_finish_destroy: %p=%s\n",
idev, dev ? dev->name : "NIL"); idev, dev ? dev->name : "NIL");
@ -399,7 +399,7 @@ static int inet_set_ifa(struct net_device *dev, struct in_ifaddr *ifa)
} }
ipv4_devconf_setall(in_dev); ipv4_devconf_setall(in_dev);
if (ifa->ifa_dev != in_dev) { if (ifa->ifa_dev != in_dev) {
BUG_TRAP(!ifa->ifa_dev); WARN_ON(ifa->ifa_dev);
in_dev_hold(in_dev); in_dev_hold(in_dev);
ifa->ifa_dev = in_dev; ifa->ifa_dev = in_dev;
} }

Просмотреть файл

@ -167,7 +167,7 @@ tb_not_found:
success: success:
if (!inet_csk(sk)->icsk_bind_hash) if (!inet_csk(sk)->icsk_bind_hash)
inet_bind_hash(sk, tb, snum); inet_bind_hash(sk, tb, snum);
BUG_TRAP(inet_csk(sk)->icsk_bind_hash == tb); WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
ret = 0; ret = 0;
fail_unlock: fail_unlock:
@ -260,7 +260,7 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
} }
newsk = reqsk_queue_get_child(&icsk->icsk_accept_queue, sk); newsk = reqsk_queue_get_child(&icsk->icsk_accept_queue, sk);
BUG_TRAP(newsk->sk_state != TCP_SYN_RECV); WARN_ON(newsk->sk_state == TCP_SYN_RECV);
out: out:
release_sock(sk); release_sock(sk);
return newsk; return newsk;
@ -386,7 +386,7 @@ struct request_sock *inet_csk_search_req(const struct sock *sk,
ireq->rmt_addr == raddr && ireq->rmt_addr == raddr &&
ireq->loc_addr == laddr && ireq->loc_addr == laddr &&
AF_INET_FAMILY(req->rsk_ops->family)) { AF_INET_FAMILY(req->rsk_ops->family)) {
BUG_TRAP(!req->sk); WARN_ON(req->sk);
*prevp = prev; *prevp = prev;
break; break;
} }
@ -539,14 +539,14 @@ EXPORT_SYMBOL_GPL(inet_csk_clone);
*/ */
void inet_csk_destroy_sock(struct sock *sk) void inet_csk_destroy_sock(struct sock *sk)
{ {
BUG_TRAP(sk->sk_state == TCP_CLOSE); WARN_ON(sk->sk_state != TCP_CLOSE);
BUG_TRAP(sock_flag(sk, SOCK_DEAD)); WARN_ON(!sock_flag(sk, SOCK_DEAD));
/* It cannot be in hash table! */ /* It cannot be in hash table! */
BUG_TRAP(sk_unhashed(sk)); WARN_ON(!sk_unhashed(sk));
/* If it has not 0 inet_sk(sk)->num, it must be bound */ /* If it has not 0 inet_sk(sk)->num, it must be bound */
BUG_TRAP(!inet_sk(sk)->num || inet_csk(sk)->icsk_bind_hash); WARN_ON(inet_sk(sk)->num && !inet_csk(sk)->icsk_bind_hash);
sk->sk_prot->destroy(sk); sk->sk_prot->destroy(sk);
@ -629,7 +629,7 @@ void inet_csk_listen_stop(struct sock *sk)
local_bh_disable(); local_bh_disable();
bh_lock_sock(child); bh_lock_sock(child);
BUG_TRAP(!sock_owned_by_user(child)); WARN_ON(sock_owned_by_user(child));
sock_hold(child); sock_hold(child);
sk->sk_prot->disconnect(child, O_NONBLOCK); sk->sk_prot->disconnect(child, O_NONBLOCK);
@ -647,7 +647,7 @@ void inet_csk_listen_stop(struct sock *sk)
sk_acceptq_removed(sk); sk_acceptq_removed(sk);
__reqsk_free(req); __reqsk_free(req);
} }
BUG_TRAP(!sk->sk_ack_backlog); WARN_ON(sk->sk_ack_backlog);
} }
EXPORT_SYMBOL_GPL(inet_csk_listen_stop); EXPORT_SYMBOL_GPL(inet_csk_listen_stop);

Просмотреть файл

@ -134,8 +134,8 @@ void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f,
struct sk_buff *fp; struct sk_buff *fp;
struct netns_frags *nf; struct netns_frags *nf;
BUG_TRAP(q->last_in & INET_FRAG_COMPLETE); WARN_ON(!(q->last_in & INET_FRAG_COMPLETE));
BUG_TRAP(del_timer(&q->timer) == 0); WARN_ON(del_timer(&q->timer) != 0);
/* Release all fragment data. */ /* Release all fragment data. */
fp = q->fragments; fp = q->fragments;

Просмотреть файл

@ -305,7 +305,7 @@ unique:
inet->num = lport; inet->num = lport;
inet->sport = htons(lport); inet->sport = htons(lport);
sk->sk_hash = hash; sk->sk_hash = hash;
BUG_TRAP(sk_unhashed(sk)); WARN_ON(!sk_unhashed(sk));
__sk_add_node(sk, &head->chain); __sk_add_node(sk, &head->chain);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
write_unlock(lock); write_unlock(lock);
@ -342,7 +342,7 @@ void __inet_hash_nolisten(struct sock *sk)
rwlock_t *lock; rwlock_t *lock;
struct inet_ehash_bucket *head; struct inet_ehash_bucket *head;
BUG_TRAP(sk_unhashed(sk)); WARN_ON(!sk_unhashed(sk));
sk->sk_hash = inet_sk_ehashfn(sk); sk->sk_hash = inet_sk_ehashfn(sk);
head = inet_ehash_bucket(hashinfo, sk->sk_hash); head = inet_ehash_bucket(hashinfo, sk->sk_hash);
@ -367,7 +367,7 @@ static void __inet_hash(struct sock *sk)
return; return;
} }
BUG_TRAP(sk_unhashed(sk)); WARN_ON(!sk_unhashed(sk));
list = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)]; list = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
lock = &hashinfo->lhash_lock; lock = &hashinfo->lhash_lock;
@ -450,7 +450,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
*/ */
inet_bind_bucket_for_each(tb, node, &head->chain) { inet_bind_bucket_for_each(tb, node, &head->chain) {
if (tb->ib_net == net && tb->port == port) { if (tb->ib_net == net && tb->port == port) {
BUG_TRAP(!hlist_empty(&tb->owners)); WARN_ON(hlist_empty(&tb->owners));
if (tb->fastreuse >= 0) if (tb->fastreuse >= 0)
goto next_port; goto next_port;
if (!check_established(death_row, sk, if (!check_established(death_row, sk,

Просмотреть файл

@ -86,7 +86,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
hashinfo->bhash_size)]; hashinfo->bhash_size)];
spin_lock(&bhead->lock); spin_lock(&bhead->lock);
tw->tw_tb = icsk->icsk_bind_hash; tw->tw_tb = icsk->icsk_bind_hash;
BUG_TRAP(icsk->icsk_bind_hash); WARN_ON(!icsk->icsk_bind_hash);
inet_twsk_add_bind_node(tw, &tw->tw_tb->owners); inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
spin_unlock(&bhead->lock); spin_unlock(&bhead->lock);

Просмотреть файл

@ -488,8 +488,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
qp->q.fragments = head; qp->q.fragments = head;
} }
BUG_TRAP(head != NULL); WARN_ON(head == NULL);
BUG_TRAP(FRAG_CB(head)->offset == 0); WARN_ON(FRAG_CB(head)->offset != 0);
/* Allocate a new buffer for the datagram. */ /* Allocate a new buffer for the datagram. */
ihlen = ip_hdrlen(head); ihlen = ip_hdrlen(head);

Просмотреть файл

@ -118,7 +118,7 @@ static int ip_dev_loopback_xmit(struct sk_buff *newskb)
__skb_pull(newskb, skb_network_offset(newskb)); __skb_pull(newskb, skb_network_offset(newskb));
newskb->pkt_type = PACKET_LOOPBACK; newskb->pkt_type = PACKET_LOOPBACK;
newskb->ip_summed = CHECKSUM_UNNECESSARY; newskb->ip_summed = CHECKSUM_UNNECESSARY;
BUG_TRAP(newskb->dst); WARN_ON(!newskb->dst);
netif_rx(newskb); netif_rx(newskb);
return 0; return 0;
} }

Просмотреть файл

@ -1096,7 +1096,7 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
#if TCP_DEBUG #if TCP_DEBUG
struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)); WARN_ON(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
#endif #endif
if (inet_csk_ack_scheduled(sk)) { if (inet_csk_ack_scheduled(sk)) {
@ -1358,7 +1358,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
goto found_ok_skb; goto found_ok_skb;
if (tcp_hdr(skb)->fin) if (tcp_hdr(skb)->fin)
goto found_fin_ok; goto found_fin_ok;
BUG_TRAP(flags & MSG_PEEK); WARN_ON(!(flags & MSG_PEEK));
skb = skb->next; skb = skb->next;
} while (skb != (struct sk_buff *)&sk->sk_receive_queue); } while (skb != (struct sk_buff *)&sk->sk_receive_queue);
@ -1421,8 +1421,8 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
tp->ucopy.len = len; tp->ucopy.len = len;
BUG_TRAP(tp->copied_seq == tp->rcv_nxt || WARN_ON(tp->copied_seq != tp->rcv_nxt &&
(flags & (MSG_PEEK | MSG_TRUNC))); !(flags & (MSG_PEEK | MSG_TRUNC)));
/* Ugly... If prequeue is not empty, we have to /* Ugly... If prequeue is not empty, we have to
* process it before releasing socket, otherwise * process it before releasing socket, otherwise
@ -1844,7 +1844,7 @@ adjudge_to_death:
*/ */
local_bh_disable(); local_bh_disable();
bh_lock_sock(sk); bh_lock_sock(sk);
BUG_TRAP(!sock_owned_by_user(sk)); WARN_ON(sock_owned_by_user(sk));
/* Have we already been destroyed by a softirq or backlog? */ /* Have we already been destroyed by a softirq or backlog? */
if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
@ -1973,7 +1973,7 @@ int tcp_disconnect(struct sock *sk, int flags)
memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
__sk_dst_reset(sk); __sk_dst_reset(sk);
BUG_TRAP(!inet->num || icsk->icsk_bind_hash); WARN_ON(inet->num && !icsk->icsk_bind_hash);
sk->sk_error_report(sk); sk->sk_error_report(sk);
return err; return err;

Просмотреть файл

@ -1629,10 +1629,10 @@ advance_sp:
out: out:
#if FASTRETRANS_DEBUG > 0 #if FASTRETRANS_DEBUG > 0
BUG_TRAP((int)tp->sacked_out >= 0); WARN_ON((int)tp->sacked_out < 0);
BUG_TRAP((int)tp->lost_out >= 0); WARN_ON((int)tp->lost_out < 0);
BUG_TRAP((int)tp->retrans_out >= 0); WARN_ON((int)tp->retrans_out < 0);
BUG_TRAP((int)tcp_packets_in_flight(tp) >= 0); WARN_ON((int)tcp_packets_in_flight(tp) < 0);
#endif #endif
return flag; return flag;
} }
@ -2181,7 +2181,7 @@ static void tcp_mark_head_lost(struct sock *sk, int packets)
int err; int err;
unsigned int mss; unsigned int mss;
BUG_TRAP(packets <= tp->packets_out); WARN_ON(packets > tp->packets_out);
if (tp->lost_skb_hint) { if (tp->lost_skb_hint) {
skb = tp->lost_skb_hint; skb = tp->lost_skb_hint;
cnt = tp->lost_cnt_hint; cnt = tp->lost_cnt_hint;
@ -2610,7 +2610,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
/* E. Check state exit conditions. State can be terminated /* E. Check state exit conditions. State can be terminated
* when high_seq is ACKed. */ * when high_seq is ACKed. */
if (icsk->icsk_ca_state == TCP_CA_Open) { if (icsk->icsk_ca_state == TCP_CA_Open) {
BUG_TRAP(tp->retrans_out == 0); WARN_ON(tp->retrans_out != 0);
tp->retrans_stamp = 0; tp->retrans_stamp = 0;
} else if (!before(tp->snd_una, tp->high_seq)) { } else if (!before(tp->snd_una, tp->high_seq)) {
switch (icsk->icsk_ca_state) { switch (icsk->icsk_ca_state) {
@ -2972,9 +2972,9 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets)
} }
#if FASTRETRANS_DEBUG > 0 #if FASTRETRANS_DEBUG > 0
BUG_TRAP((int)tp->sacked_out >= 0); WARN_ON((int)tp->sacked_out < 0);
BUG_TRAP((int)tp->lost_out >= 0); WARN_ON((int)tp->lost_out < 0);
BUG_TRAP((int)tp->retrans_out >= 0); WARN_ON((int)tp->retrans_out < 0);
if (!tp->packets_out && tcp_is_sack(tp)) { if (!tp->packets_out && tcp_is_sack(tp)) {
icsk = inet_csk(sk); icsk = inet_csk(sk);
if (tp->lost_out) { if (tp->lost_out) {
@ -3877,7 +3877,7 @@ static void tcp_sack_remove(struct tcp_sock *tp)
int i; int i;
/* RCV.NXT must cover all the block! */ /* RCV.NXT must cover all the block! */
BUG_TRAP(!before(tp->rcv_nxt, sp->end_seq)); WARN_ON(before(tp->rcv_nxt, sp->end_seq));
/* Zap this SACK, by moving forward any other SACKS. */ /* Zap this SACK, by moving forward any other SACKS. */
for (i=this_sack+1; i < num_sacks; i++) for (i=this_sack+1; i < num_sacks; i++)

Просмотреть файл

@ -418,7 +418,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
/* ICMPs are not backlogged, hence we cannot get /* ICMPs are not backlogged, hence we cannot get
an established socket here. an established socket here.
*/ */
BUG_TRAP(!req->sk); WARN_ON(req->sk);
if (seq != tcp_rsk(req)->snt_isn) { if (seq != tcp_rsk(req)->snt_isn) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);

Просмотреть файл

@ -287,7 +287,7 @@ static void tcp_retransmit_timer(struct sock *sk)
if (!tp->packets_out) if (!tp->packets_out)
goto out; goto out;
BUG_TRAP(!tcp_write_queue_empty(sk)); WARN_ON(tcp_write_queue_empty(sk));
if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) && if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
!((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) { !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {

Просмотреть файл

@ -313,8 +313,10 @@ static void in6_dev_finish_destroy_rcu(struct rcu_head *head)
void in6_dev_finish_destroy(struct inet6_dev *idev) void in6_dev_finish_destroy(struct inet6_dev *idev)
{ {
struct net_device *dev = idev->dev; struct net_device *dev = idev->dev;
BUG_TRAP(idev->addr_list==NULL);
BUG_TRAP(idev->mc_list==NULL); WARN_ON(idev->addr_list != NULL);
WARN_ON(idev->mc_list != NULL);
#ifdef NET_REFCNT_DEBUG #ifdef NET_REFCNT_DEBUG
printk(KERN_DEBUG "in6_dev_finish_destroy: %s\n", dev ? dev->name : "NIL"); printk(KERN_DEBUG "in6_dev_finish_destroy: %s\n", dev ? dev->name : "NIL");
#endif #endif
@ -517,8 +519,9 @@ static void addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old)
void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp) void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
{ {
BUG_TRAP(ifp->if_next==NULL); WARN_ON(ifp->if_next != NULL);
BUG_TRAP(ifp->lst_next==NULL); WARN_ON(ifp->lst_next != NULL);
#ifdef NET_REFCNT_DEBUG #ifdef NET_REFCNT_DEBUG
printk(KERN_DEBUG "inet6_ifa_finish_destroy\n"); printk(KERN_DEBUG "inet6_ifa_finish_destroy\n");
#endif #endif

Просмотреть файл

@ -153,7 +153,7 @@ lookup_protocol:
answer_flags = answer->flags; answer_flags = answer->flags;
rcu_read_unlock(); rcu_read_unlock();
BUG_TRAP(answer_prot->slab != NULL); WARN_ON(answer_prot->slab == NULL);
err = -ENOBUFS; err = -ENOBUFS;
sk = sk_alloc(net, PF_INET6, GFP_KERNEL, answer_prot); sk = sk_alloc(net, PF_INET6, GFP_KERNEL, answer_prot);

Просмотреть файл

@ -98,7 +98,7 @@ struct request_sock *inet6_csk_search_req(const struct sock *sk,
ipv6_addr_equal(&treq->rmt_addr, raddr) && ipv6_addr_equal(&treq->rmt_addr, raddr) &&
ipv6_addr_equal(&treq->loc_addr, laddr) && ipv6_addr_equal(&treq->loc_addr, laddr) &&
(!treq->iif || treq->iif == iif)) { (!treq->iif || treq->iif == iif)) {
BUG_TRAP(req->sk == NULL); WARN_ON(req->sk != NULL);
*prevp = prev; *prevp = prev;
return req; return req;
} }

Просмотреть файл

@ -28,7 +28,7 @@ void __inet6_hash(struct sock *sk)
struct hlist_head *list; struct hlist_head *list;
rwlock_t *lock; rwlock_t *lock;
BUG_TRAP(sk_unhashed(sk)); WARN_ON(!sk_unhashed(sk));
if (sk->sk_state == TCP_LISTEN) { if (sk->sk_state == TCP_LISTEN) {
list = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)]; list = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
@ -202,7 +202,7 @@ unique:
* in hash table socket with a funny identity. */ * in hash table socket with a funny identity. */
inet->num = lport; inet->num = lport;
inet->sport = htons(lport); inet->sport = htons(lport);
BUG_TRAP(sk_unhashed(sk)); WARN_ON(!sk_unhashed(sk));
__sk_add_node(sk, &head->chain); __sk_add_node(sk, &head->chain);
sk->sk_hash = hash; sk->sk_hash = hash;
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);

Просмотреть файл

@ -287,7 +287,7 @@ static int fib6_dump_node(struct fib6_walker_t *w)
w->leaf = rt; w->leaf = rt;
return 1; return 1;
} }
BUG_TRAP(res!=0); WARN_ON(res == 0);
} }
w->leaf = NULL; w->leaf = NULL;
return 0; return 0;
@ -778,7 +778,7 @@ out:
pn->leaf = fib6_find_prefix(info->nl_net, pn); pn->leaf = fib6_find_prefix(info->nl_net, pn);
#if RT6_DEBUG >= 2 #if RT6_DEBUG >= 2
if (!pn->leaf) { if (!pn->leaf) {
BUG_TRAP(pn->leaf != NULL); WARN_ON(pn->leaf == NULL);
pn->leaf = info->nl_net->ipv6.ip6_null_entry; pn->leaf = info->nl_net->ipv6.ip6_null_entry;
} }
#endif #endif
@ -942,7 +942,7 @@ struct fib6_node * fib6_locate(struct fib6_node *root,
#ifdef CONFIG_IPV6_SUBTREES #ifdef CONFIG_IPV6_SUBTREES
if (src_len) { if (src_len) {
BUG_TRAP(saddr!=NULL); WARN_ON(saddr == NULL);
if (fn && fn->subtree) if (fn && fn->subtree)
fn = fib6_locate_1(fn->subtree, saddr, src_len, fn = fib6_locate_1(fn->subtree, saddr, src_len,
offsetof(struct rt6_info, rt6i_src)); offsetof(struct rt6_info, rt6i_src));
@ -996,9 +996,9 @@ static struct fib6_node *fib6_repair_tree(struct net *net,
RT6_TRACE("fixing tree: plen=%d iter=%d\n", fn->fn_bit, iter); RT6_TRACE("fixing tree: plen=%d iter=%d\n", fn->fn_bit, iter);
iter++; iter++;
BUG_TRAP(!(fn->fn_flags&RTN_RTINFO)); WARN_ON(fn->fn_flags & RTN_RTINFO);
BUG_TRAP(!(fn->fn_flags&RTN_TL_ROOT)); WARN_ON(fn->fn_flags & RTN_TL_ROOT);
BUG_TRAP(fn->leaf==NULL); WARN_ON(fn->leaf != NULL);
children = 0; children = 0;
child = NULL; child = NULL;
@ -1014,7 +1014,7 @@ static struct fib6_node *fib6_repair_tree(struct net *net,
fn->leaf = fib6_find_prefix(net, fn); fn->leaf = fib6_find_prefix(net, fn);
#if RT6_DEBUG >= 2 #if RT6_DEBUG >= 2
if (fn->leaf==NULL) { if (fn->leaf==NULL) {
BUG_TRAP(fn->leaf); WARN_ON(!fn->leaf);
fn->leaf = net->ipv6.ip6_null_entry; fn->leaf = net->ipv6.ip6_null_entry;
} }
#endif #endif
@ -1025,16 +1025,17 @@ static struct fib6_node *fib6_repair_tree(struct net *net,
pn = fn->parent; pn = fn->parent;
#ifdef CONFIG_IPV6_SUBTREES #ifdef CONFIG_IPV6_SUBTREES
if (FIB6_SUBTREE(pn) == fn) { if (FIB6_SUBTREE(pn) == fn) {
BUG_TRAP(fn->fn_flags&RTN_ROOT); WARN_ON(!(fn->fn_flags & RTN_ROOT));
FIB6_SUBTREE(pn) = NULL; FIB6_SUBTREE(pn) = NULL;
nstate = FWS_L; nstate = FWS_L;
} else { } else {
BUG_TRAP(!(fn->fn_flags&RTN_ROOT)); WARN_ON(fn->fn_flags & RTN_ROOT);
#endif #endif
if (pn->right == fn) pn->right = child; if (pn->right == fn) pn->right = child;
else if (pn->left == fn) pn->left = child; else if (pn->left == fn) pn->left = child;
#if RT6_DEBUG >= 2 #if RT6_DEBUG >= 2
else BUG_TRAP(0); else
WARN_ON(1);
#endif #endif
if (child) if (child)
child->parent = pn; child->parent = pn;
@ -1154,14 +1155,14 @@ int fib6_del(struct rt6_info *rt, struct nl_info *info)
#if RT6_DEBUG >= 2 #if RT6_DEBUG >= 2
if (rt->u.dst.obsolete>0) { if (rt->u.dst.obsolete>0) {
BUG_TRAP(fn==NULL); WARN_ON(fn != NULL);
return -ENOENT; return -ENOENT;
} }
#endif #endif
if (fn == NULL || rt == net->ipv6.ip6_null_entry) if (fn == NULL || rt == net->ipv6.ip6_null_entry)
return -ENOENT; return -ENOENT;
BUG_TRAP(fn->fn_flags&RTN_RTINFO); WARN_ON(!(fn->fn_flags & RTN_RTINFO));
if (!(rt->rt6i_flags&RTF_CACHE)) { if (!(rt->rt6i_flags&RTF_CACHE)) {
struct fib6_node *pn = fn; struct fib6_node *pn = fn;
@ -1266,7 +1267,7 @@ static int fib6_walk_continue(struct fib6_walker_t *w)
w->node = pn; w->node = pn;
#ifdef CONFIG_IPV6_SUBTREES #ifdef CONFIG_IPV6_SUBTREES
if (FIB6_SUBTREE(pn) == fn) { if (FIB6_SUBTREE(pn) == fn) {
BUG_TRAP(fn->fn_flags&RTN_ROOT); WARN_ON(!(fn->fn_flags & RTN_ROOT));
w->state = FWS_L; w->state = FWS_L;
continue; continue;
} }
@ -1281,7 +1282,7 @@ static int fib6_walk_continue(struct fib6_walker_t *w)
continue; continue;
} }
#if RT6_DEBUG >= 2 #if RT6_DEBUG >= 2
BUG_TRAP(0); WARN_ON(1);
#endif #endif
} }
} }
@ -1323,7 +1324,7 @@ static int fib6_clean_node(struct fib6_walker_t *w)
} }
return 0; return 0;
} }
BUG_TRAP(res==0); WARN_ON(res != 0);
} }
w->leaf = rt; w->leaf = rt;
return 0; return 0;

Просмотреть файл

@ -116,7 +116,7 @@ static int ip6_dev_loopback_xmit(struct sk_buff *newskb)
__skb_pull(newskb, skb_network_offset(newskb)); __skb_pull(newskb, skb_network_offset(newskb));
newskb->pkt_type = PACKET_LOOPBACK; newskb->pkt_type = PACKET_LOOPBACK;
newskb->ip_summed = CHECKSUM_UNNECESSARY; newskb->ip_summed = CHECKSUM_UNNECESSARY;
BUG_TRAP(newskb->dst); WARN_ON(!newskb->dst);
netif_rx(newskb); netif_rx(newskb);
return 0; return 0;

Просмотреть файл

@ -164,8 +164,8 @@ static int mip6_destopt_output(struct xfrm_state *x, struct sk_buff *skb)
calc_padlen(sizeof(*dstopt), 6)); calc_padlen(sizeof(*dstopt), 6));
hao->type = IPV6_TLV_HAO; hao->type = IPV6_TLV_HAO;
BUILD_BUG_ON(sizeof(*hao) != 18);
hao->length = sizeof(*hao) - 2; hao->length = sizeof(*hao) - 2;
BUG_TRAP(hao->length == 16);
len = ((char *)hao - (char *)dstopt) + sizeof(*hao); len = ((char *)hao - (char *)dstopt) + sizeof(*hao);
@ -174,7 +174,7 @@ static int mip6_destopt_output(struct xfrm_state *x, struct sk_buff *skb)
memcpy(&iph->saddr, x->coaddr, sizeof(iph->saddr)); memcpy(&iph->saddr, x->coaddr, sizeof(iph->saddr));
spin_unlock_bh(&x->lock); spin_unlock_bh(&x->lock);
BUG_TRAP(len == x->props.header_len); WARN_ON(len != x->props.header_len);
dstopt->hdrlen = (x->props.header_len >> 3) - 1; dstopt->hdrlen = (x->props.header_len >> 3) - 1;
return 0; return 0;
@ -317,7 +317,7 @@ static int mip6_destopt_init_state(struct xfrm_state *x)
x->props.header_len = sizeof(struct ipv6_destopt_hdr) + x->props.header_len = sizeof(struct ipv6_destopt_hdr) +
calc_padlen(sizeof(struct ipv6_destopt_hdr), 6) + calc_padlen(sizeof(struct ipv6_destopt_hdr), 6) +
sizeof(struct ipv6_destopt_hao); sizeof(struct ipv6_destopt_hao);
BUG_TRAP(x->props.header_len == 24); WARN_ON(x->props.header_len != 24);
return 0; return 0;
} }
@ -380,7 +380,7 @@ static int mip6_rthdr_output(struct xfrm_state *x, struct sk_buff *skb)
rt2->rt_hdr.segments_left = 1; rt2->rt_hdr.segments_left = 1;
memset(&rt2->reserved, 0, sizeof(rt2->reserved)); memset(&rt2->reserved, 0, sizeof(rt2->reserved));
BUG_TRAP(rt2->rt_hdr.hdrlen == 2); WARN_ON(rt2->rt_hdr.hdrlen != 2);
memcpy(&rt2->addr, &iph->daddr, sizeof(rt2->addr)); memcpy(&rt2->addr, &iph->daddr, sizeof(rt2->addr));
spin_lock_bh(&x->lock); spin_lock_bh(&x->lock);

Просмотреть файл

@ -416,8 +416,8 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
fq_kill(fq); fq_kill(fq);
BUG_TRAP(head != NULL); WARN_ON(head == NULL);
BUG_TRAP(NFCT_FRAG6_CB(head)->offset == 0); WARN_ON(NFCT_FRAG6_CB(head)->offset != 0);
/* Unfragmented part is taken from the first segment. */ /* Unfragmented part is taken from the first segment. */
payload_len = ((head->data - skb_network_header(head)) - payload_len = ((head->data - skb_network_header(head)) -

Просмотреть файл

@ -473,8 +473,8 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
fq->q.fragments = head; fq->q.fragments = head;
} }
BUG_TRAP(head != NULL); WARN_ON(head == NULL);
BUG_TRAP(FRAG6_CB(head)->offset == 0); WARN_ON(FRAG6_CB(head)->offset != 0);
/* Unfragmented part is taken from the first segment. */ /* Unfragmented part is taken from the first segment. */
payload_len = ((head->data - skb_network_header(head)) - payload_len = ((head->data - skb_network_header(head)) -

Просмотреть файл

@ -421,7 +421,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
/* ICMPs are not backlogged, hence we cannot get /* ICMPs are not backlogged, hence we cannot get
* an established socket here. * an established socket here.
*/ */
BUG_TRAP(req->sk == NULL); WARN_ON(req->sk != NULL);
if (seq != tcp_rsk(req)->snt_isn) { if (seq != tcp_rsk(req)->snt_isn) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);

Просмотреть файл

@ -96,8 +96,8 @@ static void pfkey_sock_destruct(struct sock *sk)
return; return;
} }
BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc)); WARN_ON(atomic_read(&sk->sk_rmem_alloc));
BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc)); WARN_ON(atomic_read(&sk->sk_wmem_alloc));
atomic_dec(&pfkey_socks_nr); atomic_dec(&pfkey_socks_nr);
} }

Просмотреть файл

@ -158,9 +158,10 @@ static void netlink_sock_destruct(struct sock *sk)
printk(KERN_ERR "Freeing alive netlink socket %p\n", sk); printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
return; return;
} }
BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc));
BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc)); WARN_ON(atomic_read(&sk->sk_rmem_alloc));
BUG_TRAP(!nlk_sk(sk)->groups); WARN_ON(atomic_read(&sk->sk_wmem_alloc));
WARN_ON(nlk_sk(sk)->groups);
} }
/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on

Просмотреть файл

@ -260,8 +260,8 @@ static inline struct packet_sock *pkt_sk(struct sock *sk)
static void packet_sock_destruct(struct sock *sk) static void packet_sock_destruct(struct sock *sk)
{ {
BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc)); WARN_ON(atomic_read(&sk->sk_rmem_alloc));
BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc)); WARN_ON(atomic_read(&sk->sk_wmem_alloc));
if (!sock_flag(sk, SOCK_DEAD)) { if (!sock_flag(sk, SOCK_DEAD)) {
printk("Attempt to release alive packet socket: %p\n", sk); printk("Attempt to release alive packet socket: %p\n", sk);

Просмотреть файл

@ -660,9 +660,9 @@ static void rxrpc_sock_destructor(struct sock *sk)
rxrpc_purge_queue(&sk->sk_receive_queue); rxrpc_purge_queue(&sk->sk_receive_queue);
BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc)); WARN_ON(atomic_read(&sk->sk_wmem_alloc));
BUG_TRAP(sk_unhashed(sk)); WARN_ON(!sk_unhashed(sk));
BUG_TRAP(!sk->sk_socket); WARN_ON(sk->sk_socket);
if (!sock_flag(sk, SOCK_DEAD)) { if (!sock_flag(sk, SOCK_DEAD)) {
printk("Attempt to release alive rxrpc socket: %p\n", sk); printk("Attempt to release alive rxrpc socket: %p\n", sk);

Просмотреть файл

@ -41,7 +41,7 @@ void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo)
return; return;
} }
} }
BUG_TRAP(0); WARN_ON(1);
} }
EXPORT_SYMBOL(tcf_hash_destroy); EXPORT_SYMBOL(tcf_hash_destroy);

Просмотреть файл

@ -116,7 +116,7 @@ static void tcf_police_destroy(struct tcf_police *p)
return; return;
} }
} }
BUG_TRAP(0); WARN_ON(1);
} }
static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = { static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {

Просмотреть файл

@ -345,7 +345,7 @@ static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode* key)
} }
} }
} }
BUG_TRAP(0); WARN_ON(1);
return 0; return 0;
} }
@ -368,7 +368,7 @@ static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
struct tc_u_common *tp_c = tp->data; struct tc_u_common *tp_c = tp->data;
struct tc_u_hnode **hn; struct tc_u_hnode **hn;
BUG_TRAP(!ht->refcnt); WARN_ON(ht->refcnt);
u32_clear_hnode(tp, ht); u32_clear_hnode(tp, ht);
@ -380,7 +380,7 @@ static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
} }
} }
BUG_TRAP(0); WARN_ON(1);
return -ENOENT; return -ENOENT;
} }
@ -389,7 +389,7 @@ static void u32_destroy(struct tcf_proto *tp)
struct tc_u_common *tp_c = tp->data; struct tc_u_common *tp_c = tp->data;
struct tc_u_hnode *root_ht = xchg(&tp->root, NULL); struct tc_u_hnode *root_ht = xchg(&tp->root, NULL);
BUG_TRAP(root_ht != NULL); WARN_ON(root_ht == NULL);
if (root_ht && --root_ht->refcnt == 0) if (root_ht && --root_ht->refcnt == 0)
u32_destroy_hnode(tp, root_ht); u32_destroy_hnode(tp, root_ht);
@ -407,7 +407,7 @@ static void u32_destroy(struct tcf_proto *tp)
while ((ht = tp_c->hlist) != NULL) { while ((ht = tp_c->hlist) != NULL) {
tp_c->hlist = ht->next; tp_c->hlist = ht->next;
BUG_TRAP(ht->refcnt == 0); WARN_ON(ht->refcnt != 0);
kfree(ht); kfree(ht);
} }

Просмотреть файл

@ -1175,7 +1175,7 @@ static void cbq_unlink_class(struct cbq_class *this)
this->tparent->children = NULL; this->tparent->children = NULL;
} }
} else { } else {
BUG_TRAP(this->sibling == this); WARN_ON(this->sibling != this);
} }
} }
@ -1699,7 +1699,7 @@ static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
{ {
struct cbq_sched_data *q = qdisc_priv(sch); struct cbq_sched_data *q = qdisc_priv(sch);
BUG_TRAP(!cl->filters); WARN_ON(cl->filters);
tcf_destroy_chain(&cl->filter_list); tcf_destroy_chain(&cl->filter_list);
qdisc_destroy(cl->q); qdisc_destroy(cl->q);

Просмотреть файл

@ -746,5 +746,5 @@ void dev_shutdown(struct net_device *dev)
{ {
netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
shutdown_scheduler_queue(dev, &dev->rx_queue, NULL); shutdown_scheduler_queue(dev, &dev->rx_queue, NULL);
BUG_TRAP(!timer_pending(&dev->watchdog_timer)); WARN_ON(timer_pending(&dev->watchdog_timer));
} }

Просмотреть файл

@ -524,7 +524,7 @@ htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff)
*/ */
static inline void htb_activate(struct htb_sched *q, struct htb_class *cl) static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
{ {
BUG_TRAP(!cl->level && cl->un.leaf.q && cl->un.leaf.q->q.qlen); WARN_ON(cl->level || !cl->un.leaf.q || !cl->un.leaf.q->q.qlen);
if (!cl->prio_activity) { if (!cl->prio_activity) {
cl->prio_activity = 1 << (cl->un.leaf.aprio = cl->un.leaf.prio); cl->prio_activity = 1 << (cl->un.leaf.aprio = cl->un.leaf.prio);
@ -542,7 +542,7 @@ static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
*/ */
static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl) static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
{ {
BUG_TRAP(cl->prio_activity); WARN_ON(!cl->prio_activity);
htb_deactivate_prios(q, cl); htb_deactivate_prios(q, cl);
cl->prio_activity = 0; cl->prio_activity = 0;
@ -757,7 +757,7 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
u32 *pid; u32 *pid;
} stk[TC_HTB_MAXDEPTH], *sp = stk; } stk[TC_HTB_MAXDEPTH], *sp = stk;
BUG_TRAP(tree->rb_node); WARN_ON(!tree->rb_node);
sp->root = tree->rb_node; sp->root = tree->rb_node;
sp->pptr = pptr; sp->pptr = pptr;
sp->pid = pid; sp->pid = pid;
@ -777,7 +777,7 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
*sp->pptr = (*sp->pptr)->rb_left; *sp->pptr = (*sp->pptr)->rb_left;
if (sp > stk) { if (sp > stk) {
sp--; sp--;
BUG_TRAP(*sp->pptr); WARN_ON(!*sp->pptr);
if (!*sp->pptr) if (!*sp->pptr)
return NULL; return NULL;
htb_next_rb_node(sp->pptr); htb_next_rb_node(sp->pptr);
@ -792,7 +792,7 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
sp->pid = cl->un.inner.last_ptr_id + prio; sp->pid = cl->un.inner.last_ptr_id + prio;
} }
} }
BUG_TRAP(0); WARN_ON(1);
return NULL; return NULL;
} }
@ -810,7 +810,7 @@ static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio,
do { do {
next: next:
BUG_TRAP(cl); WARN_ON(!cl);
if (!cl) if (!cl)
return NULL; return NULL;
@ -1185,7 +1185,7 @@ static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
{ {
struct htb_class *parent = cl->parent; struct htb_class *parent = cl->parent;
BUG_TRAP(!cl->level && cl->un.leaf.q && !cl->prio_activity); WARN_ON(cl->level || !cl->un.leaf.q || cl->prio_activity);
if (parent->cmode != HTB_CAN_SEND) if (parent->cmode != HTB_CAN_SEND)
htb_safe_rb_erase(&parent->pq_node, q->wait_pq + parent->level); htb_safe_rb_erase(&parent->pq_node, q->wait_pq + parent->level);
@ -1205,7 +1205,7 @@ static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl) static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
{ {
if (!cl->level) { if (!cl->level) {
BUG_TRAP(cl->un.leaf.q); WARN_ON(!cl->un.leaf.q);
qdisc_destroy(cl->un.leaf.q); qdisc_destroy(cl->un.leaf.q);
} }
gen_kill_estimator(&cl->bstats, &cl->rate_est); gen_kill_estimator(&cl->bstats, &cl->rate_est);

Просмотреть файл

@ -464,7 +464,7 @@ static void sctp_association_destroy(struct sctp_association *asoc)
spin_unlock_bh(&sctp_assocs_id_lock); spin_unlock_bh(&sctp_assocs_id_lock);
} }
BUG_TRAP(!atomic_read(&asoc->rmem_alloc)); WARN_ON(atomic_read(&asoc->rmem_alloc));
if (asoc->base.malloced) { if (asoc->base.malloced) {
kfree(asoc); kfree(asoc);

Просмотреть файл

@ -227,7 +227,7 @@ static void __unix_remove_socket(struct sock *sk)
static void __unix_insert_socket(struct hlist_head *list, struct sock *sk) static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
{ {
BUG_TRAP(sk_unhashed(sk)); WARN_ON(!sk_unhashed(sk));
sk_add_node(sk, list); sk_add_node(sk, list);
} }
@ -350,9 +350,9 @@ static void unix_sock_destructor(struct sock *sk)
skb_queue_purge(&sk->sk_receive_queue); skb_queue_purge(&sk->sk_receive_queue);
BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc)); WARN_ON(atomic_read(&sk->sk_wmem_alloc));
BUG_TRAP(sk_unhashed(sk)); WARN_ON(!sk_unhashed(sk));
BUG_TRAP(!sk->sk_socket); WARN_ON(sk->sk_socket);
if (!sock_flag(sk, SOCK_DEAD)) { if (!sock_flag(sk, SOCK_DEAD)) {
printk("Attempt to release alive unix socket: %p\n", sk); printk("Attempt to release alive unix socket: %p\n", sk);
return; return;

Просмотреть файл

@ -718,7 +718,7 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end; int end;
BUG_TRAP(start <= offset + len); WARN_ON(start > offset + len);
end = start + skb_shinfo(skb)->frags[i].size; end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) { if ((copy = end - offset) > 0) {
@ -748,7 +748,7 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
for (; list; list = list->next) { for (; list; list = list->next) {
int end; int end;
BUG_TRAP(start <= offset + len); WARN_ON(start > offset + len);
end = start + list->len; end = start + list->len;
if ((copy = end - offset) > 0) { if ((copy = end - offset) > 0) {

Просмотреть файл

@ -22,7 +22,6 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/rtnetlink.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <net/ip.h> #include <net/ip.h>
@ -251,7 +250,7 @@ static void ipcomp_free_tfms(struct crypto_comp **tfms)
break; break;
} }
BUG_TRAP(pos); WARN_ON(!pos);
if (--pos->users) if (--pos->users)
return; return;

Просмотреть файл

@ -538,7 +538,7 @@ EXPORT_SYMBOL(xfrm_state_alloc);
void __xfrm_state_destroy(struct xfrm_state *x) void __xfrm_state_destroy(struct xfrm_state *x)
{ {
BUG_TRAP(x->km.state == XFRM_STATE_DEAD); WARN_ON(x->km.state != XFRM_STATE_DEAD);
spin_lock_bh(&xfrm_state_lock); spin_lock_bh(&xfrm_state_lock);
list_del(&x->all); list_del(&x->all);