]> git.dujemihanovic.xyz Git - linux.git/commitdiff
bpf, netkit: Add indirect call wrapper for fetching peer dev
authorDaniel Borkmann <daniel@iogearbox.net>
Tue, 14 Nov 2023 00:42:18 +0000 (01:42 +0100)
committerMartin KaFai Lau <martin.lau@kernel.org>
Mon, 20 Nov 2023 18:15:16 +0000 (10:15 -0800)
ndo_get_peer_dev is used in tcx BPF fast path, therefore make use of
indirect call wrapper and therefore optimize the bpf_redirect_peer()
internal handling a bit. Add a small skb_get_peer_dev() wrapper which
utilizes the INDIRECT_CALL_1() macro instead of open coding.

Future work could potentially add a peer pointer directly into struct
net_device in future and convert veth and netkit over to use it so
that eventually ndo_get_peer_dev can be removed.

Co-developed-by: Nikolay Aleksandrov <razor@blackwall.org>
Signed-off-by: Nikolay Aleksandrov <razor@blackwall.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Stanislav Fomichev <sdf@google.com>
Link: https://lore.kernel.org/r/20231114004220.6495-7-daniel@iogearbox.net
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
drivers/net/netkit.c
include/net/netkit.h
net/core/filter.c

index 99de11f9cde5c8102a933064a77d557cadd904c6..97bd6705c24117cff255ebbe34fc9efc4c0da2bd 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/filter.h>
 #include <linux/netfilter_netdev.h>
 #include <linux/bpf_mprog.h>
+#include <linux/indirect_call_wrapper.h>
 
 #include <net/netkit.h>
 #include <net/dst.h>
@@ -177,7 +178,7 @@ out:
        rcu_read_unlock();
 }
 
-static struct net_device *netkit_peer_dev(struct net_device *dev)
+INDIRECT_CALLABLE_SCOPE struct net_device *netkit_peer_dev(struct net_device *dev)
 {
        return rcu_dereference(netkit_priv(dev)->peer);
 }
index 0ba2e6b847ca53de6d2bcb480c0d615cc8f2dcc0..9ec0163739f45156657cb0d1b0f895514dd022a9 100644 (file)
@@ -10,6 +10,7 @@ int netkit_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog);
 int netkit_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
 int netkit_prog_detach(const union bpf_attr *attr, struct bpf_prog *prog);
 int netkit_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr);
+INDIRECT_CALLABLE_DECLARE(struct net_device *netkit_peer_dev(struct net_device *dev));
 #else
 static inline int netkit_prog_attach(const union bpf_attr *attr,
                                     struct bpf_prog *prog)
@@ -34,5 +35,10 @@ static inline int netkit_prog_query(const union bpf_attr *attr,
 {
        return -EINVAL;
 }
+
+static inline struct net_device *netkit_peer_dev(struct net_device *dev)
+{
+       return NULL;
+}
 #endif /* CONFIG_NETKIT */
 #endif /* __NET_NETKIT_H */
index cca810987c8dc93d1addac4df86658e7da271b05..7e4d7c3bcc849a9211eca4246cda7fa76af13c36 100644 (file)
@@ -81,6 +81,7 @@
 #include <net/xdp.h>
 #include <net/mptcp.h>
 #include <net/netfilter/nf_conntrack_bpf.h>
+#include <net/netkit.h>
 #include <linux/un.h>
 
 #include "dev.h"
@@ -2468,6 +2469,16 @@ static const struct bpf_func_proto bpf_clone_redirect_proto = {
 DEFINE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info);
 EXPORT_PER_CPU_SYMBOL_GPL(bpf_redirect_info);
 
+static struct net_device *skb_get_peer_dev(struct net_device *dev)
+{
+       const struct net_device_ops *ops = dev->netdev_ops;
+
+       if (likely(ops->ndo_get_peer_dev))
+               return INDIRECT_CALL_1(ops->ndo_get_peer_dev,
+                                      netkit_peer_dev, dev);
+       return NULL;
+}
+
 int skb_do_redirect(struct sk_buff *skb)
 {
        struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
@@ -2481,12 +2492,9 @@ int skb_do_redirect(struct sk_buff *skb)
        if (unlikely(!dev))
                goto out_drop;
        if (flags & BPF_F_PEER) {
-               const struct net_device_ops *ops = dev->netdev_ops;
-
-               if (unlikely(!ops->ndo_get_peer_dev ||
-                            !skb_at_tc_ingress(skb)))
+               if (unlikely(!skb_at_tc_ingress(skb)))
                        goto out_drop;
-               dev = ops->ndo_get_peer_dev(dev);
+               dev = skb_get_peer_dev(dev);
                if (unlikely(!dev ||
                             !(dev->flags & IFF_UP) ||
                             net_eq(net, dev_net(dev))))