net: add rb_to_skb() and other rb tree helpers

Geeralize private netem_rb_to_skb()

TCP rtx queue will soon be converted to rb-tree,
so we will need skb_rbtree_walk() helpers.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
(cherry picked from commit 18a4c0eab2)
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Eric Dumazet 2018-09-13 07:58:57 -07:00 committed by Greg Kroah-Hartman
parent 6bf32cda46
commit 37c7cc80b1
4 changed files with 37 additions and 36 deletions

View File

@ -3169,6 +3169,12 @@ static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode) #define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
#define skb_rb_first(root) rb_to_skb(rb_first(root))
#define skb_rb_last(root) rb_to_skb(rb_last(root))
#define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode))
#define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode))
#define skb_queue_walk(queue, skb) \ #define skb_queue_walk(queue, skb) \
for (skb = (queue)->next; \ for (skb = (queue)->next; \
skb != (struct sk_buff *)(queue); \ skb != (struct sk_buff *)(queue); \
@ -3183,6 +3189,18 @@ static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
for (; skb != (struct sk_buff *)(queue); \ for (; skb != (struct sk_buff *)(queue); \
skb = skb->next) skb = skb->next)
#define skb_rbtree_walk(skb, root) \
for (skb = skb_rb_first(root); skb != NULL; \
skb = skb_rb_next(skb))
#define skb_rbtree_walk_from(skb) \
for (; skb != NULL; \
skb = skb_rb_next(skb))
#define skb_rbtree_walk_from_safe(skb, tmp) \
for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL); \
skb = tmp)
#define skb_queue_walk_from_safe(queue, skb, tmp) \ #define skb_queue_walk_from_safe(queue, skb, tmp) \
for (tmp = skb->next; \ for (tmp = skb->next; \
skb != (struct sk_buff *)(queue); \ skb != (struct sk_buff *)(queue); \

View File

@ -458,17 +458,15 @@ bool tcp_fastopen_active_should_disable(struct sock *sk)
void tcp_fastopen_active_disable_ofo_check(struct sock *sk) void tcp_fastopen_active_disable_ofo_check(struct sock *sk)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct rb_node *p;
struct sk_buff *skb;
struct dst_entry *dst; struct dst_entry *dst;
struct sk_buff *skb;
if (!tp->syn_fastopen) if (!tp->syn_fastopen)
return; return;
if (!tp->data_segs_in) { if (!tp->data_segs_in) {
p = rb_first(&tp->out_of_order_queue); skb = skb_rb_first(&tp->out_of_order_queue);
if (p && !rb_next(p)) { if (skb && !skb_rb_next(skb)) {
skb = rb_entry(p, struct sk_buff, rbnode);
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) { if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
tcp_fastopen_active_disable(sk); tcp_fastopen_active_disable(sk);
return; return;

View File

@ -4372,7 +4372,7 @@ static void tcp_ofo_queue(struct sock *sk)
p = rb_first(&tp->out_of_order_queue); p = rb_first(&tp->out_of_order_queue);
while (p) { while (p) {
skb = rb_entry(p, struct sk_buff, rbnode); skb = rb_to_skb(p);
if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
break; break;
@ -4440,7 +4440,7 @@ static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb,
static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct rb_node **p, *q, *parent; struct rb_node **p, *parent;
struct sk_buff *skb1; struct sk_buff *skb1;
u32 seq, end_seq; u32 seq, end_seq;
bool fragstolen; bool fragstolen;
@ -4503,7 +4503,7 @@ coalesce_done:
parent = NULL; parent = NULL;
while (*p) { while (*p) {
parent = *p; parent = *p;
skb1 = rb_entry(parent, struct sk_buff, rbnode); skb1 = rb_to_skb(parent);
if (before(seq, TCP_SKB_CB(skb1)->seq)) { if (before(seq, TCP_SKB_CB(skb1)->seq)) {
p = &parent->rb_left; p = &parent->rb_left;
continue; continue;
@ -4548,9 +4548,7 @@ insert:
merge_right: merge_right:
/* Remove other segments covered by skb. */ /* Remove other segments covered by skb. */
while ((q = rb_next(&skb->rbnode)) != NULL) { while ((skb1 = skb_rb_next(skb)) != NULL) {
skb1 = rb_entry(q, struct sk_buff, rbnode);
if (!after(end_seq, TCP_SKB_CB(skb1)->seq)) if (!after(end_seq, TCP_SKB_CB(skb1)->seq))
break; break;
if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) { if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
@ -4565,7 +4563,7 @@ merge_right:
tcp_drop(sk, skb1); tcp_drop(sk, skb1);
} }
/* If there is no skb after us, we are the last_skb ! */ /* If there is no skb after us, we are the last_skb ! */
if (!q) if (!skb1)
tp->ooo_last_skb = skb; tp->ooo_last_skb = skb;
add_sack: add_sack:
@ -4749,7 +4747,7 @@ static struct sk_buff *tcp_skb_next(struct sk_buff *skb, struct sk_buff_head *li
if (list) if (list)
return !skb_queue_is_last(list, skb) ? skb->next : NULL; return !skb_queue_is_last(list, skb) ? skb->next : NULL;
return rb_entry_safe(rb_next(&skb->rbnode), struct sk_buff, rbnode); return skb_rb_next(skb);
} }
static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb, static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
@ -4778,7 +4776,7 @@ static void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb)
while (*p) { while (*p) {
parent = *p; parent = *p;
skb1 = rb_entry(parent, struct sk_buff, rbnode); skb1 = rb_to_skb(parent);
if (before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb1)->seq)) if (before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb1)->seq))
p = &parent->rb_left; p = &parent->rb_left;
else else
@ -4898,19 +4896,12 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
u32 range_truesize, sum_tiny = 0; u32 range_truesize, sum_tiny = 0;
struct sk_buff *skb, *head; struct sk_buff *skb, *head;
struct rb_node *p;
u32 start, end; u32 start, end;
p = rb_first(&tp->out_of_order_queue); skb = skb_rb_first(&tp->out_of_order_queue);
skb = rb_entry_safe(p, struct sk_buff, rbnode);
new_range: new_range:
if (!skb) { if (!skb) {
p = rb_last(&tp->out_of_order_queue); tp->ooo_last_skb = skb_rb_last(&tp->out_of_order_queue);
/* Note: This is possible p is NULL here. We do not
* use rb_entry_safe(), as ooo_last_skb is valid only
* if rbtree is not empty.
*/
tp->ooo_last_skb = rb_entry(p, struct sk_buff, rbnode);
return; return;
} }
start = TCP_SKB_CB(skb)->seq; start = TCP_SKB_CB(skb)->seq;
@ -4918,7 +4909,7 @@ new_range:
range_truesize = skb->truesize; range_truesize = skb->truesize;
for (head = skb;;) { for (head = skb;;) {
skb = tcp_skb_next(skb, NULL); skb = skb_rb_next(skb);
/* Range is terminated when we see a gap or when /* Range is terminated when we see a gap or when
* we are at the queue end. * we are at the queue end.
@ -4974,7 +4965,7 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
prev = rb_prev(node); prev = rb_prev(node);
rb_erase(node, &tp->out_of_order_queue); rb_erase(node, &tp->out_of_order_queue);
goal -= rb_to_skb(node)->truesize; goal -= rb_to_skb(node)->truesize;
tcp_drop(sk, rb_entry(node, struct sk_buff, rbnode)); tcp_drop(sk, rb_to_skb(node));
if (!prev || goal <= 0) { if (!prev || goal <= 0) {
sk_mem_reclaim(sk); sk_mem_reclaim(sk);
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
@ -4984,7 +4975,7 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
} }
node = prev; node = prev;
} while (node); } while (node);
tp->ooo_last_skb = rb_entry(prev, struct sk_buff, rbnode); tp->ooo_last_skb = rb_to_skb(prev);
/* Reset SACK state. A conforming SACK implementation will /* Reset SACK state. A conforming SACK implementation will
* do the same at a timeout based retransmit. When a connection * do the same at a timeout based retransmit. When a connection

View File

@ -149,12 +149,6 @@ struct netem_skb_cb {
ktime_t tstamp_save; ktime_t tstamp_save;
}; };
static struct sk_buff *netem_rb_to_skb(struct rb_node *rb)
{
return rb_entry(rb, struct sk_buff, rbnode);
}
static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb) static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
{ {
/* we assume we can use skb next/prev/tstamp as storage for rb_node */ /* we assume we can use skb next/prev/tstamp as storage for rb_node */
@ -365,7 +359,7 @@ static void tfifo_reset(struct Qdisc *sch)
struct rb_node *p; struct rb_node *p;
while ((p = rb_first(&q->t_root))) { while ((p = rb_first(&q->t_root))) {
struct sk_buff *skb = netem_rb_to_skb(p); struct sk_buff *skb = rb_to_skb(p);
rb_erase(p, &q->t_root); rb_erase(p, &q->t_root);
rtnl_kfree_skbs(skb, skb); rtnl_kfree_skbs(skb, skb);
@ -382,7 +376,7 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
struct sk_buff *skb; struct sk_buff *skb;
parent = *p; parent = *p;
skb = netem_rb_to_skb(parent); skb = rb_to_skb(parent);
if (tnext >= netem_skb_cb(skb)->time_to_send) if (tnext >= netem_skb_cb(skb)->time_to_send)
p = &parent->rb_right; p = &parent->rb_right;
else else
@ -538,7 +532,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff *t_skb; struct sk_buff *t_skb;
struct netem_skb_cb *t_last; struct netem_skb_cb *t_last;
t_skb = netem_rb_to_skb(rb_last(&q->t_root)); t_skb = skb_rb_last(&q->t_root);
t_last = netem_skb_cb(t_skb); t_last = netem_skb_cb(t_skb);
if (!last || if (!last ||
t_last->time_to_send > last->time_to_send) { t_last->time_to_send > last->time_to_send) {
@ -618,7 +612,7 @@ deliver:
if (p) { if (p) {
psched_time_t time_to_send; psched_time_t time_to_send;
skb = netem_rb_to_skb(p); skb = rb_to_skb(p);
/* if more time remaining? */ /* if more time remaining? */
time_to_send = netem_skb_cb(skb)->time_to_send; time_to_send = netem_skb_cb(skb)->time_to_send;