summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-06-05 19:42:19 +0300
committerDavid S. Miller <davem@davemloft.net>2018-06-05 19:42:19 +0300
commitfd129f8941cf2309def29b5c8a23b62faff0c9d0 (patch)
tree6ad8afbb59eaf14cfa9f0c4bad498254e6ff1e66 /drivers
parenta6fa9087fc280bba8a045d11d9b5d86cbf9a3a83 (diff)
parent9fa06104a235f64d6a2bf3012cc9966e8e4be5eb (diff)
downloadlinux-fd129f8941cf2309def29b5c8a23b62faff0c9d0.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Daniel Borkmann says: ==================== pull-request: bpf-next 2018-06-05 The following pull-request contains BPF updates for your *net-next* tree. The main changes are: 1) Add a new BPF hook for sendmsg similar to existing hooks for bind and connect: "This allows to override source IP (including the case when it's set via cmsg(3)) and destination IP:port for unconnected UDP (slow path). TCP and connected UDP (fast path) are not affected. This makes UDP support complete, that is, connected UDP is handled by connect hooks, unconnected by sendmsg ones.", from Andrey. 2) Rework of the AF_XDP API to allow extending it in future for type writer model if necessary. In this mode a memory window is passed to hardware and multiple frames might be filled into that window instead of just one that is the case in the current fixed frame-size model. With the new changes made this can be supported without having to add a new descriptor format. Also, core bits for the zero-copy support for AF_XDP have been merged as agreed upon, where i40e bits will be routed via Jeff later on. Various improvements to documentation and sample programs included as well, all from Björn and Magnus. 3) Given BPF's flexibility, a new program type has been added to implement infrared decoders. Quote: "The kernel IR decoders support the most widely used IR protocols, but there are many protocols which are not supported. [...] There is a 'long tail' of unsupported IR protocols, for which lircd is need to decode the IR. IR encoding is done in such a way that some simple circuit can decode it; therefore, BPF is ideal. [...] user-space can define a decoder in BPF, attach it to the rc device through the lirc chardev.", from Sean. 4) Several improvements and fixes to BPF core, among others, dumping map and prog IDs into fdinfo which is a straight forward way to correlate BPF objects used by applications, removing an indirect call and therefore retpoline in all map lookup/update/delete calls by invoking the callback directly for 64 bit archs, adding a new bpf_skb_cgroup_id() BPF helper for tc BPF programs to have an efficient way of looking up cgroup v2 id for policy or other use cases. Fixes to make sure we zero tunnel/xfrm state that hasn't been filled, to allow context access wrt pt_regs in 32 bit archs for tracing, and last but not least various test cases for fixes that landed in bpf earlier, from Daniel. 5) Get rid of the ndo_xdp_flush API and extend the ndo_xdp_xmit with a XDP_XMIT_FLUSH flag instead which allows to avoid one indirect call as flushing is now merged directly into ndo_xdp_xmit(), from Jesper. 6) Add a new bpf_get_current_cgroup_id() helper that can be used in tracing to retrieve the cgroup id from the current process in order to allow for e.g. aggregation of container-level events, from Yonghong. 7) Two follow-up fixes for BTF to reject invalid input values and related to that also two test cases for BPF kselftests, from Martin. 8) Various API improvements to the bpf_fib_lookup() helper, that is, dropping MPLS bits which are not fully hashed out yet, rejecting invalid helper flags, returning error for unsupported address families as well as renaming flowlabel to flowinfo, from David. 9) Various fixes and improvements to sockmap BPF kselftests in particular in proper error detection and data verification, from Prashant. 10) Two arm32 BPF JIT improvements. One is to fix imm range check with regards to whether immediate fits into 24 bits, and a naming cleanup to get functions related to rsh handling consistent to those handling lsh, from Wang. 11) Two compile warning fixes in BPF, one for BTF and a false positive to silent gcc in stack_map_get_build_id_offset(), from Arnd. 12) Add missing seg6.h header into tools include infrastructure in order to fix compilation of BPF kselftests, from Mathieu. 13) Several formatting cleanups in the BPF UAPI helper description that also fix an error during rst2man compilation, from Quentin. 14) Hide an unused variable in sk_msg_convert_ctx_access() when IPv6 is not built into the kernel, from Yue. 15) Remove a useless double assignment in dev_map_enqueue(), from Colin. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/media/rc/Kconfig13
-rw-r--r--drivers/media/rc/Makefile1
-rw-r--r--drivers/media/rc/bpf-lirc.c313
-rw-r--r--drivers/media/rc/lirc_dev.c30
-rw-r--r--drivers/media/rc/rc-core-priv.h21
-rw-r--r--drivers/media/rc/rc-ir-raw.c12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c33
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c42
-rw-r--r--drivers/net/tun.c44
-rw-r--r--drivers/net/virtio_net.c22
12 files changed, 443 insertions, 93 deletions
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index eb2c3b6eca7f..d5b35a6ba899 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -25,6 +25,19 @@ config LIRC
passes raw IR to and from userspace, which is needed for
IR transmitting (aka "blasting") and for the lirc daemon.
+config BPF_LIRC_MODE2
+ bool "Support for eBPF programs attached to lirc devices"
+ depends on BPF_SYSCALL
+ depends on RC_CORE=y
+ depends on LIRC
+ help
+ Allow attaching eBPF programs to a lirc device using the bpf(2)
+ syscall command BPF_PROG_ATTACH. This is supported for raw IR
+ receivers.
+
+ These eBPF programs can be used to decode IR into scancodes, for
+ IR protocols not supported by the kernel decoders.
+
menuconfig RC_DECODERS
bool "Remote controller decoders"
depends on RC_CORE
diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile
index 2e1c87066f6c..e0340d043fe8 100644
--- a/drivers/media/rc/Makefile
+++ b/drivers/media/rc/Makefile
@@ -5,6 +5,7 @@ obj-y += keymaps/
obj-$(CONFIG_RC_CORE) += rc-core.o
rc-core-y := rc-main.o rc-ir-raw.o
rc-core-$(CONFIG_LIRC) += lirc_dev.o
+rc-core-$(CONFIG_BPF_LIRC_MODE2) += bpf-lirc.o
obj-$(CONFIG_IR_NEC_DECODER) += ir-nec-decoder.o
obj-$(CONFIG_IR_RC5_DECODER) += ir-rc5-decoder.o
obj-$(CONFIG_IR_RC6_DECODER) += ir-rc6-decoder.o
diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c
new file mode 100644
index 000000000000..40826bba06b6
--- /dev/null
+++ b/drivers/media/rc/bpf-lirc.c
@@ -0,0 +1,313 @@
+// SPDX-License-Identifier: GPL-2.0
+// bpf-lirc.c - handles bpf
+//
+// Copyright (C) 2018 Sean Young <sean@mess.org>
+
+#include <linux/bpf.h>
+#include <linux/filter.h>
+#include <linux/bpf_lirc.h>
+#include "rc-core-priv.h"
+
+/*
+ * BPF interface for raw IR
+ */
+const struct bpf_prog_ops lirc_mode2_prog_ops = {
+};
+
+BPF_CALL_1(bpf_rc_repeat, u32*, sample)
+{
+ struct ir_raw_event_ctrl *ctrl;
+
+ ctrl = container_of(sample, struct ir_raw_event_ctrl, bpf_sample);
+
+ rc_repeat(ctrl->dev);
+
+ return 0;
+}
+
+static const struct bpf_func_proto rc_repeat_proto = {
+ .func = bpf_rc_repeat,
+ .gpl_only = true, /* rc_repeat is EXPORT_SYMBOL_GPL */
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+};
+
+/*
+ * Currently rc-core does not support 64-bit scancodes, but there are many
+ * known protocols with more than 32 bits. So, define the interface as u64
+ * as a future-proof.
+ */
+BPF_CALL_4(bpf_rc_keydown, u32*, sample, u32, protocol, u64, scancode,
+ u32, toggle)
+{
+ struct ir_raw_event_ctrl *ctrl;
+
+ ctrl = container_of(sample, struct ir_raw_event_ctrl, bpf_sample);
+
+ rc_keydown(ctrl->dev, protocol, scancode, toggle != 0);
+
+ return 0;
+}
+
+static const struct bpf_func_proto rc_keydown_proto = {
+ .func = bpf_rc_keydown,
+ .gpl_only = true, /* rc_keydown is EXPORT_SYMBOL_GPL */
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_ANYTHING,
+ .arg3_type = ARG_ANYTHING,
+ .arg4_type = ARG_ANYTHING,
+};
+
+static const struct bpf_func_proto *
+lirc_mode2_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
+{
+ switch (func_id) {
+ case BPF_FUNC_rc_repeat:
+ return &rc_repeat_proto;
+ case BPF_FUNC_rc_keydown:
+ return &rc_keydown_proto;
+ case BPF_FUNC_map_lookup_elem:
+ return &bpf_map_lookup_elem_proto;
+ case BPF_FUNC_map_update_elem:
+ return &bpf_map_update_elem_proto;
+ case BPF_FUNC_map_delete_elem:
+ return &bpf_map_delete_elem_proto;
+ case BPF_FUNC_ktime_get_ns:
+ return &bpf_ktime_get_ns_proto;
+ case BPF_FUNC_tail_call:
+ return &bpf_tail_call_proto;
+ case BPF_FUNC_get_prandom_u32:
+ return &bpf_get_prandom_u32_proto;
+ case BPF_FUNC_trace_printk:
+ if (capable(CAP_SYS_ADMIN))
+ return bpf_get_trace_printk_proto();
+ /* fall through */
+ default:
+ return NULL;
+ }
+}
+
+static bool lirc_mode2_is_valid_access(int off, int size,
+ enum bpf_access_type type,
+ const struct bpf_prog *prog,
+ struct bpf_insn_access_aux *info)
+{
+ /* We have one field of u32 */
+ return type == BPF_READ && off == 0 && size == sizeof(u32);
+}
+
+const struct bpf_verifier_ops lirc_mode2_verifier_ops = {
+ .get_func_proto = lirc_mode2_func_proto,
+ .is_valid_access = lirc_mode2_is_valid_access
+};
+
+#define BPF_MAX_PROGS 64
+
+static int lirc_bpf_attach(struct rc_dev *rcdev, struct bpf_prog *prog)
+{
+ struct bpf_prog_array __rcu *old_array;
+ struct bpf_prog_array *new_array;
+ struct ir_raw_event_ctrl *raw;
+ int ret;
+
+ if (rcdev->driver_type != RC_DRIVER_IR_RAW)
+ return -EINVAL;
+
+ ret = mutex_lock_interruptible(&ir_raw_handler_lock);
+ if (ret)
+ return ret;
+
+ raw = rcdev->raw;
+ if (!raw) {
+ ret = -ENODEV;
+ goto unlock;
+ }
+
+ if (raw->progs && bpf_prog_array_length(raw->progs) >= BPF_MAX_PROGS) {
+ ret = -E2BIG;
+ goto unlock;
+ }
+
+ old_array = raw->progs;
+ ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
+ if (ret < 0)
+ goto unlock;
+
+ rcu_assign_pointer(raw->progs, new_array);
+ bpf_prog_array_free(old_array);
+
+unlock:
+ mutex_unlock(&ir_raw_handler_lock);
+ return ret;
+}
+
+static int lirc_bpf_detach(struct rc_dev *rcdev, struct bpf_prog *prog)
+{
+ struct bpf_prog_array __rcu *old_array;
+ struct bpf_prog_array *new_array;
+ struct ir_raw_event_ctrl *raw;
+ int ret;
+
+ if (rcdev->driver_type != RC_DRIVER_IR_RAW)
+ return -EINVAL;
+
+ ret = mutex_lock_interruptible(&ir_raw_handler_lock);
+ if (ret)
+ return ret;
+
+ raw = rcdev->raw;
+ if (!raw) {
+ ret = -ENODEV;
+ goto unlock;
+ }
+
+ old_array = raw->progs;
+ ret = bpf_prog_array_copy(old_array, prog, NULL, &new_array);
+ /*
+ * Do not use bpf_prog_array_delete_safe() as we would end up
+ * with a dummy entry in the array, and the we would free the
+ * dummy in lirc_bpf_free()
+ */
+ if (ret)
+ goto unlock;
+
+ rcu_assign_pointer(raw->progs, new_array);
+ bpf_prog_array_free(old_array);
+unlock:
+ mutex_unlock(&ir_raw_handler_lock);
+ return ret;
+}
+
+void lirc_bpf_run(struct rc_dev *rcdev, u32 sample)
+{
+ struct ir_raw_event_ctrl *raw = rcdev->raw;
+
+ raw->bpf_sample = sample;
+
+ if (raw->progs)
+ BPF_PROG_RUN_ARRAY(raw->progs, &raw->bpf_sample, BPF_PROG_RUN);
+}
+
+/*
+ * This should be called once the rc thread has been stopped, so there can be
+ * no concurrent bpf execution.
+ */
+void lirc_bpf_free(struct rc_dev *rcdev)
+{
+ struct bpf_prog **progs;
+
+ if (!rcdev->raw->progs)
+ return;
+
+ progs = rcu_dereference(rcdev->raw->progs)->progs;
+ while (*progs)
+ bpf_prog_put(*progs++);
+
+ bpf_prog_array_free(rcdev->raw->progs);
+}
+
+int lirc_prog_attach(const union bpf_attr *attr)
+{
+ struct bpf_prog *prog;
+ struct rc_dev *rcdev;
+ int ret;
+
+ if (attr->attach_flags)
+ return -EINVAL;
+
+ prog = bpf_prog_get_type(attr->attach_bpf_fd,
+ BPF_PROG_TYPE_LIRC_MODE2);
+ if (IS_ERR(prog))
+ return PTR_ERR(prog);
+
+ rcdev = rc_dev_get_from_fd(attr->target_fd);
+ if (IS_ERR(rcdev)) {
+ bpf_prog_put(prog);
+ return PTR_ERR(rcdev);
+ }
+
+ ret = lirc_bpf_attach(rcdev, prog);
+ if (ret)
+ bpf_prog_put(prog);
+
+ put_device(&rcdev->dev);
+
+ return ret;
+}
+
+int lirc_prog_detach(const union bpf_attr *attr)
+{
+ struct bpf_prog *prog;
+ struct rc_dev *rcdev;
+ int ret;
+
+ if (attr->attach_flags)
+ return -EINVAL;
+
+ prog = bpf_prog_get_type(attr->attach_bpf_fd,
+ BPF_PROG_TYPE_LIRC_MODE2);
+ if (IS_ERR(prog))
+ return PTR_ERR(prog);
+
+ rcdev = rc_dev_get_from_fd(attr->target_fd);
+ if (IS_ERR(rcdev)) {
+ bpf_prog_put(prog);
+ return PTR_ERR(rcdev);
+ }
+
+ ret = lirc_bpf_detach(rcdev, prog);
+
+ bpf_prog_put(prog);
+ put_device(&rcdev->dev);
+
+ return ret;
+}
+
+int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr)
+{
+ __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
+ struct bpf_prog_array __rcu *progs;
+ struct rc_dev *rcdev;
+ u32 cnt, flags = 0;
+ int ret;
+
+ if (attr->query.query_flags)
+ return -EINVAL;
+
+ rcdev = rc_dev_get_from_fd(attr->query.target_fd);
+ if (IS_ERR(rcdev))
+ return PTR_ERR(rcdev);
+
+ if (rcdev->driver_type != RC_DRIVER_IR_RAW) {
+ ret = -EINVAL;
+ goto put;
+ }
+
+ ret = mutex_lock_interruptible(&ir_raw_handler_lock);
+ if (ret)
+ goto put;
+
+ progs = rcdev->raw->progs;
+ cnt = progs ? bpf_prog_array_length(progs) : 0;
+
+ if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt))) {
+ ret = -EFAULT;
+ goto unlock;
+ }
+
+ if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags))) {
+ ret = -EFAULT;
+ goto unlock;
+ }
+
+ if (attr->query.prog_cnt != 0 && prog_ids && cnt)
+ ret = bpf_prog_array_copy_to_user(progs, prog_ids, cnt);
+
+unlock:
+ mutex_unlock(&ir_raw_handler_lock);
+put:
+ put_device(&rcdev->dev);
+
+ return ret;
+}
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index 24e9fbb80e81..da7013a12a58 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -20,6 +20,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/device.h>
+#include <linux/file.h>
#include <linux/idr.h>
#include <linux/poll.h>
#include <linux/sched.h>
@@ -104,6 +105,12 @@ void ir_lirc_raw_event(struct rc_dev *dev, struct ir_raw_event ev)
TO_US(ev.duration), TO_STR(ev.pulse));
}
+ /*
+ * bpf does not care about the gap generated above; that exists
+ * for backwards compatibility
+ */
+ lirc_bpf_run(dev, sample);
+
spin_lock_irqsave(&dev->lirc_fh_lock, flags);
list_for_each_entry(fh, &dev->lirc_fh, list) {
if (LIRC_IS_TIMEOUT(sample) && !fh->send_timeout_reports)
@@ -816,4 +823,27 @@ void __exit lirc_dev_exit(void)
unregister_chrdev_region(lirc_base_dev, RC_DEV_MAX);
}
+struct rc_dev *rc_dev_get_from_fd(int fd)
+{
+ struct fd f = fdget(fd);
+ struct lirc_fh *fh;
+ struct rc_dev *dev;
+
+ if (!f.file)
+ return ERR_PTR(-EBADF);
+
+ if (f.file->f_op != &lirc_fops) {
+ fdput(f);
+ return ERR_PTR(-EINVAL);
+ }
+
+ fh = f.file->private_data;
+ dev = fh->rc;
+
+ get_device(&dev->dev);
+ fdput(f);
+
+ return dev;
+}
+
MODULE_ALIAS("lirc_dev");
diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h
index e0e6a17460f6..eb004757038b 100644
--- a/drivers/media/rc/rc-core-priv.h
+++ b/drivers/media/rc/rc-core-priv.h
@@ -13,6 +13,7 @@
#define MAX_IR_EVENT_SIZE 512
#include <linux/slab.h>
+#include <uapi/linux/bpf.h>
#include <media/rc-core.h>
/**
@@ -57,6 +58,11 @@ struct ir_raw_event_ctrl {
/* raw decoder state follows */
struct ir_raw_event prev_ev;
struct ir_raw_event this_ev;
+
+#ifdef CONFIG_BPF_LIRC_MODE2
+ u32 bpf_sample;
+ struct bpf_prog_array __rcu *progs;
+#endif
struct nec_dec {
int state;
unsigned count;
@@ -126,6 +132,9 @@ struct ir_raw_event_ctrl {
} imon;
};
+/* Mutex for locking raw IR processing and handler change */
+extern struct mutex ir_raw_handler_lock;
+
/* macros for IR decoders */
static inline bool geq_margin(unsigned d1, unsigned d2, unsigned margin)
{
@@ -288,6 +297,7 @@ void ir_lirc_raw_event(struct rc_dev *dev, struct ir_raw_event ev);
void ir_lirc_scancode_event(struct rc_dev *dev, struct lirc_scancode *lsc);
int ir_lirc_register(struct rc_dev *dev);
void ir_lirc_unregister(struct rc_dev *dev);
+struct rc_dev *rc_dev_get_from_fd(int fd);
#else
static inline int lirc_dev_init(void) { return 0; }
static inline void lirc_dev_exit(void) {}
@@ -299,4 +309,15 @@ static inline int ir_lirc_register(struct rc_dev *dev) { return 0; }
static inline void ir_lirc_unregister(struct rc_dev *dev) { }
#endif
+/*
+ * bpf interface
+ */
+#ifdef CONFIG_BPF_LIRC_MODE2
+void lirc_bpf_free(struct rc_dev *dev);
+void lirc_bpf_run(struct rc_dev *dev, u32 sample);
+#else
+static inline void lirc_bpf_free(struct rc_dev *dev) { }
+static inline void lirc_bpf_run(struct rc_dev *dev, u32 sample) { }
+#endif
+
#endif /* _RC_CORE_PRIV */
diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c
index 374f83105a23..7675b7ee5bc7 100644
--- a/drivers/media/rc/rc-ir-raw.c
+++ b/drivers/media/rc/rc-ir-raw.c
@@ -14,7 +14,7 @@
static LIST_HEAD(ir_raw_client_list);
/* Used to handle IR raw handler extensions */
-static DEFINE_MUTEX(ir_raw_handler_lock);
+DEFINE_MUTEX(ir_raw_handler_lock);
static LIST_HEAD(ir_raw_handler_list);
static atomic64_t available_protocols = ATOMIC64_INIT(0);
@@ -621,9 +621,17 @@ void ir_raw_event_unregister(struct rc_dev *dev)
list_for_each_entry(handler, &ir_raw_handler_list, list)
if (handler->raw_unregister)
handler->raw_unregister(dev);
- mutex_unlock(&ir_raw_handler_lock);
+
+ lirc_bpf_free(dev);
ir_raw_event_free(dev);
+
+ /*
+ * A user can be calling bpf(BPF_PROG_{QUERY|ATTACH|DETACH}), so
+ * ensure that the raw member is null on unlock; this is how
+ * "device gone" is checked.
+ */
+ mutex_unlock(&ir_raw_handler_lock);
}
/*
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index b5daa5c9c7de..c944bd10b03d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -11883,7 +11883,6 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_bridge_setlink = i40e_ndo_bridge_setlink,
.ndo_bpf = i40e_xdp,
.ndo_xdp_xmit = i40e_xdp_xmit,
- .ndo_xdp_flush = i40e_xdp_flush,
};
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 105a26f447c0..8ffb7454e67c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -3693,11 +3693,13 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
* For error cases, a negative errno code is returned and no-frames
* are transmitted (caller must handle freeing frames).
**/
-int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames)
+int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags)
{
struct i40e_netdev_priv *np = netdev_priv(dev);
unsigned int queue_index = smp_processor_id();
struct i40e_vsi *vsi = np->vsi;
+ struct i40e_ring *xdp_ring;
int drops = 0;
int i;
@@ -3707,35 +3709,24 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames)
if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs)
return -ENXIO;
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ xdp_ring = vsi->xdp_rings[queue_index];
+
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
int err;
- err = i40e_xmit_xdp_ring(xdpf, vsi->xdp_rings[queue_index]);
+ err = i40e_xmit_xdp_ring(xdpf, xdp_ring);
if (err != I40E_XDP_TX) {
xdp_return_frame_rx_napi(xdpf);
drops++;
}
}
- return n - drops;
-}
-
-/**
- * i40e_xdp_flush - Implements ndo_xdp_flush
- * @dev: netdev
- **/
-void i40e_xdp_flush(struct net_device *dev)
-{
- struct i40e_netdev_priv *np = netdev_priv(dev);
- unsigned int queue_index = smp_processor_id();
- struct i40e_vsi *vsi = np->vsi;
-
- if (test_bit(__I40E_VSI_DOWN, vsi->state))
- return;
-
- if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs)
- return;
+ if (unlikely(flags & XDP_XMIT_FLUSH))
+ i40e_xdp_ring_update_tail(xdp_ring);
- i40e_xdp_ring_update_tail(vsi->xdp_rings[queue_index]);
+ return n - drops;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index eb8804b3d7b6..bb04f6a731fe 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -487,8 +487,8 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw);
void i40e_detect_recover_hung(struct i40e_vsi *vsi);
int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
bool __i40e_chk_linearize(struct sk_buff *skb);
-int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames);
-void i40e_xdp_flush(struct net_device *dev);
+int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags);
/**
* i40e_get_head - Retrieve head from head writeback
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index dd8a3a037c2f..38b4e4899490 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -10023,8 +10023,17 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
}
+static void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring)
+{
+ /* Force memory writes to complete before letting h/w know there
+ * are new descriptors to fetch.
+ */
+ wmb();
+ writel(ring->next_to_use, ring->tail);
+}
+
static int ixgbe_xdp_xmit(struct net_device *dev, int n,
- struct xdp_frame **frames)
+ struct xdp_frame **frames, u32 flags)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_ring *ring;
@@ -10034,6 +10043,9 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n,
if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state)))
return -ENETDOWN;
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
/* During program transitions its possible adapter->xdp_prog is assigned
* but ring has not been configured yet. In this case simply abort xmit.
*/
@@ -10052,31 +10064,10 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n,
}
}
- return n - drops;
-}
-
-static void ixgbe_xdp_flush(struct net_device *dev)
-{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
- struct ixgbe_ring *ring;
-
- /* Its possible the device went down between xdp xmit and flush so
- * we need to ensure device is still up.
- */
- if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state)))
- return;
-
- ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL;
- if (unlikely(!ring))
- return;
-
- /* Force memory writes to complete before letting h/w know there
- * are new descriptors to fetch.
- */
- wmb();
- writel(ring->next_to_use, ring->tail);
+ if (unlikely(flags & XDP_XMIT_FLUSH))
+ ixgbe_xdp_ring_update_tail(ring);
- return;
+ return n - drops;
}
static const struct net_device_ops ixgbe_netdev_ops = {
@@ -10126,7 +10117,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_features_check = ixgbe_features_check,
.ndo_bpf = ixgbe_xdp,
.ndo_xdp_xmit = ixgbe_xdp_xmit,
- .ndo_xdp_flush = ixgbe_xdp_flush,
};
/**
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 067fc9e7e3ed..85e14adf5207 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1289,7 +1289,16 @@ static const struct net_device_ops tun_netdev_ops = {
.ndo_get_stats64 = tun_net_get_stats64,
};
-static int tun_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames)
+static void __tun_xdp_flush_tfile(struct tun_file *tfile)
+{
+ /* Notify and wake up reader process */
+ if (tfile->flags & TUN_FASYNC)
+ kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
+ tfile->socket.sk->sk_data_ready(tfile->socket.sk);
+}
+
+static int tun_xdp_xmit(struct net_device *dev, int n,
+ struct xdp_frame **frames, u32 flags)
{
struct tun_struct *tun = netdev_priv(dev);
struct tun_file *tfile;
@@ -1298,6 +1307,9 @@ static int tun_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames
int cnt = n;
int i;
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
rcu_read_lock();
numqueues = READ_ONCE(tun->numqueues);
@@ -1325,6 +1337,9 @@ static int tun_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames
}
spin_unlock(&tfile->tx_ring.producer_lock);
+ if (flags & XDP_XMIT_FLUSH)
+ __tun_xdp_flush_tfile(tfile);
+
rcu_read_unlock();
return cnt - drops;
}
@@ -1336,30 +1351,7 @@ static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
if (unlikely(!frame))
return -EOVERFLOW;
- return tun_xdp_xmit(dev, 1, &frame);
-}
-
-static void tun_xdp_flush(struct net_device *dev)
-{
- struct tun_struct *tun = netdev_priv(dev);
- struct tun_file *tfile;
- u32 numqueues;
-
- rcu_read_lock();
-
- numqueues = READ_ONCE(tun->numqueues);
- if (!numqueues)
- goto out;
-
- tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
- numqueues]);
- /* Notify and wake up reader process */
- if (tfile->flags & TUN_FASYNC)
- kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
- tfile->socket.sk->sk_data_ready(tfile->socket.sk);
-
-out:
- rcu_read_unlock();
+ return tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH);
}
static const struct net_device_ops tap_netdev_ops = {
@@ -1380,7 +1372,6 @@ static const struct net_device_ops tap_netdev_ops = {
.ndo_get_stats64 = tun_net_get_stats64,
.ndo_bpf = tun_xdp,
.ndo_xdp_xmit = tun_xdp_xmit,
- .ndo_xdp_flush = tun_xdp_flush,
};
static void tun_flow_init(struct tun_struct *tun)
@@ -1699,7 +1690,6 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
alloc_frag->offset += buflen;
if (tun_xdp_tx(tun->dev, &xdp))
goto err_redirect;
- tun_xdp_flush(tun->dev);
rcu_read_unlock();
local_bh_enable();
return NULL;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 6d710b8b41c5..2aaa18ec7d46 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -413,18 +413,6 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
return skb;
}
-static void virtnet_xdp_flush(struct net_device *dev)
-{
- struct virtnet_info *vi = netdev_priv(dev);
- struct send_queue *sq;
- unsigned int qp;
-
- qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
- sq = &vi->sq[qp];
-
- virtqueue_kick(sq->vq);
-}
-
static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
struct send_queue *sq,
struct xdp_frame *xdpf)
@@ -474,7 +462,7 @@ static int __virtnet_xdp_tx_xmit(struct virtnet_info *vi,
}
static int virtnet_xdp_xmit(struct net_device *dev,
- int n, struct xdp_frame **frames)
+ int n, struct xdp_frame **frames, u32 flags)
{
struct virtnet_info *vi = netdev_priv(dev);
struct receive_queue *rq = vi->rq;
@@ -487,6 +475,9 @@ static int virtnet_xdp_xmit(struct net_device *dev,
int err;
int i;
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
sq = &vi->sq[qp];
@@ -510,6 +501,10 @@ static int virtnet_xdp_xmit(struct net_device *dev,
drops++;
}
}
+
+ if (flags & XDP_XMIT_FLUSH)
+ virtqueue_kick(sq->vq);
+
return n - drops;
}
@@ -2377,7 +2372,6 @@ static const struct net_device_ops virtnet_netdev = {
#endif
.ndo_bpf = virtnet_xdp,
.ndo_xdp_xmit = virtnet_xdp_xmit,
- .ndo_xdp_flush = virtnet_xdp_flush,
.ndo_features_check = passthru_features_check,
.ndo_get_phys_port_name = virtnet_get_phys_port_name,
};