mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-02 15:18:19 +00:00
net/sched: taprio: split segmentation logic from qdisc_enqueue()
The majority of the taprio_enqueue()'s function is spent doing TCP segmentation, which doesn't look right to me. Compilers shouldn't have a problem in inlining code no matter how we write it, so move the segmentation logic to a separate function. Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com> Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
fed87cc671
commit
2d5e8071c4
1 changed files with 36 additions and 30 deletions
|
@ -575,28 +575,10 @@ static int taprio_enqueue_one(struct sk_buff *skb, struct Qdisc *sch,
|
|||
return qdisc_enqueue(skb, child, to_free);
|
||||
}
|
||||
|
||||
/* Will not be called in the full offload case, since the TX queues are
|
||||
* attached to the Qdisc created using qdisc_create_dflt()
|
||||
*/
|
||||
static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
static int taprio_enqueue_segmented(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct Qdisc *child,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
struct taprio_sched *q = qdisc_priv(sch);
|
||||
struct Qdisc *child;
|
||||
int queue;
|
||||
|
||||
queue = skb_get_queue_mapping(skb);
|
||||
|
||||
child = q->qdiscs[queue];
|
||||
if (unlikely(!child))
|
||||
return qdisc_drop(skb, sch, to_free);
|
||||
|
||||
/* Large packets might not be transmitted when the transmission duration
|
||||
* exceeds any configured interval. Therefore, segment the skb into
|
||||
* smaller chunks. Drivers with full offload are expected to handle
|
||||
* this in hardware.
|
||||
*/
|
||||
if (skb_is_gso(skb)) {
|
||||
unsigned int slen = 0, numsegs = 0, len = qdisc_pkt_len(skb);
|
||||
netdev_features_t features = netif_skb_features(skb);
|
||||
struct sk_buff *segs, *nskb;
|
||||
|
@ -627,6 +609,30 @@ static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
|||
return numsegs > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
|
||||
}
|
||||
|
||||
/* Will not be called in the full offload case, since the TX queues are
|
||||
* attached to the Qdisc created using qdisc_create_dflt()
|
||||
*/
|
||||
static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
struct taprio_sched *q = qdisc_priv(sch);
|
||||
struct Qdisc *child;
|
||||
int queue;
|
||||
|
||||
queue = skb_get_queue_mapping(skb);
|
||||
|
||||
child = q->qdiscs[queue];
|
||||
if (unlikely(!child))
|
||||
return qdisc_drop(skb, sch, to_free);
|
||||
|
||||
/* Large packets might not be transmitted when the transmission duration
|
||||
* exceeds any configured interval. Therefore, segment the skb into
|
||||
* smaller chunks. Drivers with full offload are expected to handle
|
||||
* this in hardware.
|
||||
*/
|
||||
if (skb_is_gso(skb))
|
||||
return taprio_enqueue_segmented(skb, sch, child, to_free);
|
||||
|
||||
return taprio_enqueue_one(skb, sch, child, to_free);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue