From: Jason Xing <kernelxing@xxxxxxxxxxx> Add batch xmit logic. Only grabbing the lock and disable bottom half once and sent all the aggregated packets in one loop. Since previous patch puts descriptors in xs->skb_cache in a reversed order, this patch sends each skb out from start to end when 'start' is not smaller than 'end'. Signed-off-by: Jason Xing <kernelxing@xxxxxxxxxxx> --- include/linux/netdevice.h | 3 +++ net/core/dev.c | 19 +++++++++++++++++++ 2 files changed, 22 insertions(+) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 5e5de4b0a433..8e2688e3f2e4 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3352,6 +3352,9 @@ u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev); int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id); +int xsk_direct_xmit_batch(struct sk_buff **skbs, struct net_device *dev, + struct netdev_queue *txq, int *cur, + int start, int end); static inline int dev_queue_xmit(struct sk_buff *skb) { diff --git a/net/core/dev.c b/net/core/dev.c index 68dc47d7e700..a5a6b9a199e9 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4742,6 +4742,25 @@ int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev) } EXPORT_SYMBOL(__dev_queue_xmit); +int xsk_direct_xmit_batch(struct sk_buff **skbs, struct net_device *dev, + struct netdev_queue *txq, int *cur, + int start, int end) +{ + int ret = NETDEV_TX_BUSY; + + local_bh_disable(); + HARD_TX_LOCK(dev, txq, smp_processor_id()); + for (*cur = start; *cur >= end; (*cur)--) { + ret = netdev_start_xmit(skbs[*cur], dev, txq, false); + if (ret != NETDEV_TX_OK) + break; + } + HARD_TX_UNLOCK(dev, txq); + local_bh_enable(); + + return ret; +} + int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id) { struct net_device *dev = skb->dev; -- 2.41.3