[NET]: Transform skb_queue_len() binary tests into skb_queue_empty()

This is part of the grand scheme to eliminate the qlen
member of skb_queue_head, and subsequently remove the
'list' member of sk_buff.

Most users of skb_queue_len() want to know if the queue is
empty or not, and that's trivially done with skb_queue_empty()
which doesn't use the skb_queue_head->qlen member and instead
uses the queue list emptyness as the test.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller
2005-07-08 14:57:23 -07:00
parent a92b7b8057
commit b03efcfb21
34 changed files with 84 additions and 89 deletions

View File

@@ -1105,7 +1105,7 @@ static void tcp_prequeue_process(struct sock *sk)
struct sk_buff *skb;
struct tcp_sock *tp = tcp_sk(sk);
NET_ADD_STATS_USER(LINUX_MIB_TCPPREQUEUED, skb_queue_len(&tp->ucopy.prequeue));
NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED);
/* RX process wants to run with disabled BHs, though it is not
* necessary */
@@ -1369,7 +1369,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
* is not empty. It is more elegant, but eats cycles,
* unfortunately.
*/
if (skb_queue_len(&tp->ucopy.prequeue))
if (!skb_queue_empty(&tp->ucopy.prequeue))
goto do_prequeue;
/* __ Set realtime policy in scheduler __ */
@@ -1394,7 +1394,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
}
if (tp->rcv_nxt == tp->copied_seq &&
skb_queue_len(&tp->ucopy.prequeue)) {
!skb_queue_empty(&tp->ucopy.prequeue)) {
do_prequeue:
tcp_prequeue_process(sk);
@@ -1476,7 +1476,7 @@ skip_copy:
} while (len > 0);
if (user_recv) {
if (skb_queue_len(&tp->ucopy.prequeue)) {
if (!skb_queue_empty(&tp->ucopy.prequeue)) {
int chunk;
tp->ucopy.len = copied > 0 ? len : 0;

View File

@@ -2802,7 +2802,7 @@ static void tcp_sack_remove(struct tcp_sock *tp)
int this_sack;
/* Empty ofo queue, hence, all the SACKs are eaten. Clear. */
if (skb_queue_len(&tp->out_of_order_queue) == 0) {
if (skb_queue_empty(&tp->out_of_order_queue)) {
tp->rx_opt.num_sacks = 0;
tp->rx_opt.eff_sacks = tp->rx_opt.dsack;
return;
@@ -2935,13 +2935,13 @@ queue_and_out:
if(th->fin)
tcp_fin(skb, sk, th);
if (skb_queue_len(&tp->out_of_order_queue)) {
if (!skb_queue_empty(&tp->out_of_order_queue)) {
tcp_ofo_queue(sk);
/* RFC2581. 4.2. SHOULD send immediate ACK, when
* gap in queue is filled.
*/
if (!skb_queue_len(&tp->out_of_order_queue))
if (skb_queue_empty(&tp->out_of_order_queue))
tp->ack.pingpong = 0;
}
@@ -3249,9 +3249,8 @@ static int tcp_prune_queue(struct sock *sk)
* This must not ever occur. */
/* First, purge the out_of_order queue. */
if (skb_queue_len(&tp->out_of_order_queue)) {
NET_ADD_STATS_BH(LINUX_MIB_OFOPRUNED,
skb_queue_len(&tp->out_of_order_queue));
if (!skb_queue_empty(&tp->out_of_order_queue)) {
NET_INC_STATS_BH(LINUX_MIB_OFOPRUNED);
__skb_queue_purge(&tp->out_of_order_queue);
/* Reset SACK state. A conforming SACK implementation will

View File

@@ -231,11 +231,10 @@ static void tcp_delack_timer(unsigned long data)
}
tp->ack.pending &= ~TCP_ACK_TIMER;
if (skb_queue_len(&tp->ucopy.prequeue)) {
if (!skb_queue_empty(&tp->ucopy.prequeue)) {
struct sk_buff *skb;
NET_ADD_STATS_BH(LINUX_MIB_TCPSCHEDULERFAILED,
skb_queue_len(&tp->ucopy.prequeue));
NET_INC_STATS_BH(LINUX_MIB_TCPSCHEDULERFAILED);
while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
sk->sk_backlog_rcv(sk, skb);