]> git.dujemihanovic.xyz Git - linux.git/commitdiff
nfp: bpf: relax prog rejection for mtu check through max_pkt_offset
authorYu Xiao <yu.xiao@corigine.com>
Thu, 28 Oct 2021 10:00:36 +0000 (12:00 +0200)
committerDavid S. Miller <davem@davemloft.net>
Thu, 28 Oct 2021 11:59:32 +0000 (12:59 +0100)
MTU change is refused whenever the value of new MTU is bigger than
the max packet bytes that fits in NFP Cluster Target Memory (CTM).
However, an eBPF program doesn't always need to access the whole
packet data.

The maximum direct packet access (DPA) offset has always been
caculated by verifier and stored in the max_pkt_offset field of prog
aux data.

Signed-off-by: Yu Xiao <yu.xiao@corigine.com>
Reviewed-by: Yinjun Zhang <yinjun.zhang@corigine.com>
Reviewed-by: Niklas Soderlund <niklas.soderlund@corigine.com>
Signed-off-by: Simon Horman <simon.horman@corigine.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/netronome/nfp/bpf/main.c
drivers/net/ethernet/netronome/nfp/bpf/main.h
drivers/net/ethernet/netronome/nfp/bpf/offload.c

index 11c83a99b0140dfb9c11e8246336d6aad0fe8278..f469950c726573fd1f3a40a162b9f0f03a3bc6a5 100644 (file)
@@ -182,15 +182,21 @@ static int
 nfp_bpf_check_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu)
 {
        struct nfp_net *nn = netdev_priv(netdev);
-       unsigned int max_mtu;
+       struct nfp_bpf_vnic *bv;
+       struct bpf_prog *prog;
 
        if (~nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
                return 0;
 
-       max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
-       if (new_mtu > max_mtu) {
-               nn_info(nn, "BPF offload active, MTU over %u not supported\n",
-                       max_mtu);
+       if (nn->xdp_hw.prog) {
+               prog = nn->xdp_hw.prog;
+       } else {
+               bv = nn->app_priv;
+               prog = bv->tc_prog;
+       }
+
+       if (nfp_bpf_offload_check_mtu(nn, prog, new_mtu)) {
+               nn_info(nn, "BPF offload active, potential packet access beyond hardware packet boundary");
                return -EBUSY;
        }
        return 0;
index d0e17eebddd949bd8e07609714ad69ac876ac0ad..16841bb750b72449d47e32f53a6d1f87f23a7cc1 100644 (file)
@@ -560,6 +560,8 @@ bool nfp_is_subprog_start(struct nfp_insn_meta *meta);
 void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog);
 int nfp_bpf_jit(struct nfp_prog *prog);
 bool nfp_bpf_supported_opcode(u8 code);
+bool nfp_bpf_offload_check_mtu(struct nfp_net *nn, struct bpf_prog *prog,
+                              unsigned int mtu);
 
 int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx,
                    int prev_insn_idx);
index 53851853562c63221afca3e0f2aa8be2aa40ec48..9d97cd281f18e4d055d8b95233b8e4566f051a7b 100644 (file)
@@ -481,19 +481,28 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
        return 0;
 }
 
+bool nfp_bpf_offload_check_mtu(struct nfp_net *nn, struct bpf_prog *prog,
+                              unsigned int mtu)
+{
+       unsigned int fw_mtu, pkt_off;
+
+       fw_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
+       pkt_off = min(prog->aux->max_pkt_offset, mtu);
+
+       return fw_mtu < pkt_off;
+}
+
 static int
 nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog,
                 struct netlink_ext_ack *extack)
 {
        struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
-       unsigned int fw_mtu, pkt_off, max_stack, max_prog_len;
+       unsigned int max_stack, max_prog_len;
        dma_addr_t dma_addr;
        void *img;
        int err;
 
-       fw_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
-       pkt_off = min(prog->aux->max_pkt_offset, nn->dp.netdev->mtu);
-       if (fw_mtu < pkt_off) {
+       if (nfp_bpf_offload_check_mtu(nn, prog, nn->dp.netdev->mtu)) {
                NL_SET_ERR_MSG_MOD(extack, "BPF offload not supported with potential packet access beyond HW packet split boundary");
                return -EOPNOTSUPP;
        }