bpf/xdp: devmap can avoid calling ndo_xdp_flush

The XDP_REDIRECT map devmap can avoid using ndo_xdp_flush, by instead
instructing ndo_xdp_xmit to flush via XDP_XMIT_FLUSH flag in
appropriate places.

Notice after this patch it is possible to remove ndo_xdp_flush
completely, as this is the last user of ndo_xdp_flush. This is left
for later patches, to keep driver changes separate.

Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Acked-by: Song Liu <songliubraving@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
Jesper Dangaard Brouer 2018-05-31 11:00:23 +02:00 committed by Alexei Starovoitov
parent 1e67575a58
commit c1ece6b245

View file

@ -217,7 +217,7 @@ void __dev_map_insert_ctx(struct bpf_map *map, u32 bit)
} }
static int bq_xmit_all(struct bpf_dtab_netdev *obj, static int bq_xmit_all(struct bpf_dtab_netdev *obj,
struct xdp_bulk_queue *bq) struct xdp_bulk_queue *bq, u32 flags)
{ {
struct net_device *dev = obj->dev; struct net_device *dev = obj->dev;
int sent = 0, drops = 0, err = 0; int sent = 0, drops = 0, err = 0;
@ -232,7 +232,7 @@ static int bq_xmit_all(struct bpf_dtab_netdev *obj,
prefetch(xdpf); prefetch(xdpf);
} }
sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, 0); sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags);
if (sent < 0) { if (sent < 0) {
err = sent; err = sent;
sent = 0; sent = 0;
@ -276,7 +276,6 @@ void __dev_map_flush(struct bpf_map *map)
for_each_set_bit(bit, bitmap, map->max_entries) { for_each_set_bit(bit, bitmap, map->max_entries) {
struct bpf_dtab_netdev *dev = READ_ONCE(dtab->netdev_map[bit]); struct bpf_dtab_netdev *dev = READ_ONCE(dtab->netdev_map[bit]);
struct xdp_bulk_queue *bq; struct xdp_bulk_queue *bq;
struct net_device *netdev;
/* This is possible if the dev entry is removed by user space /* This is possible if the dev entry is removed by user space
* between xdp redirect and flush op. * between xdp redirect and flush op.
@ -287,10 +286,7 @@ void __dev_map_flush(struct bpf_map *map)
__clear_bit(bit, bitmap); __clear_bit(bit, bitmap);
bq = this_cpu_ptr(dev->bulkq); bq = this_cpu_ptr(dev->bulkq);
bq_xmit_all(dev, bq); bq_xmit_all(dev, bq, XDP_XMIT_FLUSH);
netdev = dev->dev;
if (likely(netdev->netdev_ops->ndo_xdp_flush))
netdev->netdev_ops->ndo_xdp_flush(netdev);
} }
} }
@ -320,7 +316,7 @@ static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf,
struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq); struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq);
if (unlikely(bq->count == DEV_MAP_BULK_SIZE)) if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
bq_xmit_all(obj, bq); bq_xmit_all(obj, bq, 0);
/* Ingress dev_rx will be the same for all xdp_frame's in /* Ingress dev_rx will be the same for all xdp_frame's in
* bulk_queue, because bq stored per-CPU and must be flushed * bulk_queue, because bq stored per-CPU and must be flushed
@ -359,8 +355,7 @@ static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
static void dev_map_flush_old(struct bpf_dtab_netdev *dev) static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
{ {
if (dev->dev->netdev_ops->ndo_xdp_flush) { if (dev->dev->netdev_ops->ndo_xdp_xmit) {
struct net_device *fl = dev->dev;
struct xdp_bulk_queue *bq; struct xdp_bulk_queue *bq;
unsigned long *bitmap; unsigned long *bitmap;
@ -371,9 +366,7 @@ static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
__clear_bit(dev->bit, bitmap); __clear_bit(dev->bit, bitmap);
bq = per_cpu_ptr(dev->bulkq, cpu); bq = per_cpu_ptr(dev->bulkq, cpu);
bq_xmit_all(dev, bq); bq_xmit_all(dev, bq, XDP_XMIT_FLUSH);
fl->netdev_ops->ndo_xdp_flush(dev->dev);
} }
} }
} }