]> git.dujemihanovic.xyz Git - linux.git/commitdiff
ice: improve updating ice_{t,r}x_ring::xsk_pool
authorMaciej Fijalkowski <maciej.fijalkowski@intel.com>
Fri, 26 Jul 2024 18:17:14 +0000 (20:17 +0200)
committerTony Nguyen <anthony.l.nguyen@intel.com>
Mon, 29 Jul 2024 15:52:29 +0000 (08:52 -0700)
xsk_buff_pool pointers that ice ring structs hold are updated via
ndo_bpf that is executed in process context while it can be read by
remote CPU at the same time within NAPI poll. Use synchronize_net()
after pointer update and {READ,WRITE}_ONCE() when working with mentioned
pointer.

Fixes: 2d4238f55697 ("ice: Add support for AF_XDP")
Reviewed-by: Shannon Nelson <shannon.nelson@amd.com>
Tested-by: Chandan Kumar Rout <chandanx.rout@intel.com> (A Contingent Worker at Intel)
Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
drivers/net/ethernet/intel/ice/ice.h
drivers/net/ethernet/intel/ice/ice_base.c
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_txrx.c
drivers/net/ethernet/intel/ice/ice_xsk.c
drivers/net/ethernet/intel/ice/ice_xsk.h

index 99a75a59078ef3e6c71c46696b694d00fd082ea1..caaa10157909ee8bcd01f525ace63b55184b5090 100644 (file)
@@ -765,18 +765,17 @@ static inline struct xsk_buff_pool *ice_get_xp_from_qid(struct ice_vsi *vsi,
 }
 
 /**
- * ice_xsk_pool - get XSK buffer pool bound to a ring
+ * ice_rx_xsk_pool - assign XSK buff pool to Rx ring
  * @ring: Rx ring to use
  *
- * Returns a pointer to xsk_buff_pool structure if there is a buffer pool
- * present, NULL otherwise.
+ * Sets XSK buff pool pointer on Rx ring.
  */
-static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
+static inline void ice_rx_xsk_pool(struct ice_rx_ring *ring)
 {
        struct ice_vsi *vsi = ring->vsi;
        u16 qid = ring->q_index;
 
-       return ice_get_xp_from_qid(vsi, qid);
+       WRITE_ONCE(ring->xsk_pool, ice_get_xp_from_qid(vsi, qid));
 }
 
 /**
@@ -801,7 +800,7 @@ static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid)
        if (!ring)
                return;
 
-       ring->xsk_pool = ice_get_xp_from_qid(vsi, qid);
+       WRITE_ONCE(ring->xsk_pool, ice_get_xp_from_qid(vsi, qid));
 }
 
 /**
index 5d396c1a7731482f725561a8eff709ecd3cc793e..1facf179a96fd6a5f11ebfa49d47f9d55756a163 100644 (file)
@@ -536,7 +536,7 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
                                return err;
                }
 
-               ring->xsk_pool = ice_xsk_pool(ring);
+               ice_rx_xsk_pool(ring);
                if (ring->xsk_pool) {
                        xdp_rxq_info_unreg(&ring->xdp_rxq);
 
@@ -597,7 +597,7 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
                        return 0;
                }
 
-               ok = ice_alloc_rx_bufs_zc(ring, num_bufs);
+               ok = ice_alloc_rx_bufs_zc(ring, ring->xsk_pool, num_bufs);
                if (!ok) {
                        u16 pf_q = ring->vsi->rxq_map[ring->q_index];
 
index ec636be4d17d1e9b896380ba0223e4b12298b789..3de020020bc42a1c297248b14c7437332bdb94b3 100644 (file)
@@ -2948,7 +2948,7 @@ static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
        ice_for_each_rxq(vsi, i) {
                struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
 
-               if (rx_ring->xsk_pool)
+               if (READ_ONCE(rx_ring->xsk_pool))
                        napi_schedule(&rx_ring->q_vector->napi);
        }
 }
index 8bb743f78fcb4771d852293dfcbbd3b28dbf3726..0f91e916742772ab5fdfc50ae4d7f1b1a3da55b0 100644 (file)
@@ -1521,10 +1521,11 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
         * budget and be more aggressive about cleaning up the Tx descriptors.
         */
        ice_for_each_tx_ring(tx_ring, q_vector->tx) {
+               struct xsk_buff_pool *xsk_pool = READ_ONCE(tx_ring->xsk_pool);
                bool wd;
 
-               if (tx_ring->xsk_pool)
-                       wd = ice_xmit_zc(tx_ring);
+               if (xsk_pool)
+                       wd = ice_xmit_zc(tx_ring, xsk_pool);
                else if (ice_ring_is_xdp(tx_ring))
                        wd = true;
                else
@@ -1550,6 +1551,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
                budget_per_ring = budget;
 
        ice_for_each_rx_ring(rx_ring, q_vector->rx) {
+               struct xsk_buff_pool *xsk_pool = READ_ONCE(rx_ring->xsk_pool);
                int cleaned;
 
                /* A dedicated path for zero-copy allows making a single
@@ -1557,7 +1559,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
                 * ice_clean_rx_irq function and makes the codebase cleaner.
                 */
                cleaned = rx_ring->xsk_pool ?
-                         ice_clean_rx_irq_zc(rx_ring, budget_per_ring) :
+                         ice_clean_rx_irq_zc(rx_ring, xsk_pool, budget_per_ring) :
                          ice_clean_rx_irq(rx_ring, budget_per_ring);
                work_done += cleaned;
                /* if we clean as many as budgeted, we must not be done */
index 3fbe4cfadfbfaa4864e530cb0f125497ba48f5d8..ee084ad80a61386a16fdad29cfe2275eae1ddab4 100644 (file)
@@ -250,6 +250,8 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
        ice_qvec_toggle_napi(vsi, q_vector, true);
        ice_qvec_ena_irq(vsi, q_vector);
 
+       /* make sure NAPI sees updated ice_{t,x}_ring::xsk_pool */
+       synchronize_net();
        ice_get_link_status(vsi->port_info, &link_up);
        if (link_up) {
                netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
@@ -464,6 +466,7 @@ static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
 /**
  * __ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
  * @rx_ring: Rx ring
+ * @xsk_pool: XSK buffer pool to pick buffers to be filled by HW
  * @count: The number of buffers to allocate
  *
  * Place the @count of descriptors onto Rx ring. Handle the ring wrap
@@ -472,7 +475,8 @@ static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
  *
  * Returns true if all allocations were successful, false if any fail.
  */
-static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
+static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring,
+                                  struct xsk_buff_pool *xsk_pool, u16 count)
 {
        u32 nb_buffs_extra = 0, nb_buffs = 0;
        union ice_32b_rx_flex_desc *rx_desc;
@@ -484,8 +488,7 @@ static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
        xdp = ice_xdp_buf(rx_ring, ntu);
 
        if (ntu + count >= rx_ring->count) {
-               nb_buffs_extra = ice_fill_rx_descs(rx_ring->xsk_pool, xdp,
-                                                  rx_desc,
+               nb_buffs_extra = ice_fill_rx_descs(xsk_pool, xdp, rx_desc,
                                                   rx_ring->count - ntu);
                if (nb_buffs_extra != rx_ring->count - ntu) {
                        ntu += nb_buffs_extra;
@@ -498,7 +501,7 @@ static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
                ice_release_rx_desc(rx_ring, 0);
        }
 
-       nb_buffs = ice_fill_rx_descs(rx_ring->xsk_pool, xdp, rx_desc, count);
+       nb_buffs = ice_fill_rx_descs(xsk_pool, xdp, rx_desc, count);
 
        ntu += nb_buffs;
        if (ntu == rx_ring->count)
@@ -514,6 +517,7 @@ exit:
 /**
  * ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
  * @rx_ring: Rx ring
+ * @xsk_pool: XSK buffer pool to pick buffers to be filled by HW
  * @count: The number of buffers to allocate
  *
  * Wrapper for internal allocation routine; figure out how many tail
@@ -521,7 +525,8 @@ exit:
  *
  * Returns true if all calls to internal alloc routine succeeded
  */
-bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
+bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring,
+                         struct xsk_buff_pool *xsk_pool, u16 count)
 {
        u16 rx_thresh = ICE_RING_QUARTER(rx_ring);
        u16 leftover, i, tail_bumps;
@@ -530,9 +535,9 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
        leftover = count - (tail_bumps * rx_thresh);
 
        for (i = 0; i < tail_bumps; i++)
-               if (!__ice_alloc_rx_bufs_zc(rx_ring, rx_thresh))
+               if (!__ice_alloc_rx_bufs_zc(rx_ring, xsk_pool, rx_thresh))
                        return false;
-       return __ice_alloc_rx_bufs_zc(rx_ring, leftover);
+       return __ice_alloc_rx_bufs_zc(rx_ring, xsk_pool, leftover);
 }
 
 /**
@@ -601,8 +606,10 @@ out:
 /**
  * ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ
  * @xdp_ring: XDP Tx ring
+ * @xsk_pool: AF_XDP buffer pool pointer
  */
-static u32 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
+static u32 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring,
+                               struct xsk_buff_pool *xsk_pool)
 {
        u16 ntc = xdp_ring->next_to_clean;
        struct ice_tx_desc *tx_desc;
@@ -653,7 +660,7 @@ skip:
        if (xdp_ring->next_to_clean >= cnt)
                xdp_ring->next_to_clean -= cnt;
        if (xsk_frames)
-               xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
+               xsk_tx_completed(xsk_pool, xsk_frames);
 
        return completed_frames;
 }
@@ -662,6 +669,7 @@ skip:
  * ice_xmit_xdp_tx_zc - AF_XDP ZC handler for XDP_TX
  * @xdp: XDP buffer to xmit
  * @xdp_ring: XDP ring to produce descriptor onto
+ * @xsk_pool: AF_XDP buffer pool pointer
  *
  * note that this function works directly on xdp_buff, no need to convert
  * it to xdp_frame. xdp_buff pointer is stored to ice_tx_buf so that cleaning
@@ -671,7 +679,8 @@ skip:
  * was not enough space on XDP ring
  */
 static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp,
-                             struct ice_tx_ring *xdp_ring)
+                             struct ice_tx_ring *xdp_ring,
+                             struct xsk_buff_pool *xsk_pool)
 {
        struct skb_shared_info *sinfo = NULL;
        u32 size = xdp->data_end - xdp->data;
@@ -685,7 +694,7 @@ static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp,
 
        free_space = ICE_DESC_UNUSED(xdp_ring);
        if (free_space < ICE_RING_QUARTER(xdp_ring))
-               free_space += ice_clean_xdp_irq_zc(xdp_ring);
+               free_space += ice_clean_xdp_irq_zc(xdp_ring, xsk_pool);
 
        if (unlikely(!free_space))
                goto busy;
@@ -705,7 +714,7 @@ static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp,
                dma_addr_t dma;
 
                dma = xsk_buff_xdp_get_dma(xdp);
-               xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, size);
+               xsk_buff_raw_dma_sync_for_device(xsk_pool, dma, size);
 
                tx_buf->xdp = xdp;
                tx_buf->type = ICE_TX_BUF_XSK_TX;
@@ -747,12 +756,14 @@ busy:
  * @xdp: xdp_buff used as input to the XDP program
  * @xdp_prog: XDP program to run
  * @xdp_ring: ring to be used for XDP_TX action
+ * @xsk_pool: AF_XDP buffer pool pointer
  *
  * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
  */
 static int
 ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
-              struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring)
+              struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring,
+              struct xsk_buff_pool *xsk_pool)
 {
        int err, result = ICE_XDP_PASS;
        u32 act;
@@ -763,7 +774,7 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
                err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
                if (!err)
                        return ICE_XDP_REDIR;
-               if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS)
+               if (xsk_uses_need_wakeup(xsk_pool) && err == -ENOBUFS)
                        result = ICE_XDP_EXIT;
                else
                        result = ICE_XDP_CONSUMED;
@@ -774,7 +785,7 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
        case XDP_PASS:
                break;
        case XDP_TX:
-               result = ice_xmit_xdp_tx_zc(xdp, xdp_ring);
+               result = ice_xmit_xdp_tx_zc(xdp, xdp_ring, xsk_pool);
                if (result == ICE_XDP_CONSUMED)
                        goto out_failure;
                break;
@@ -826,14 +837,16 @@ ice_add_xsk_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *first,
 /**
  * ice_clean_rx_irq_zc - consumes packets from the hardware ring
  * @rx_ring: AF_XDP Rx ring
+ * @xsk_pool: AF_XDP buffer pool pointer
  * @budget: NAPI budget
  *
  * Returns number of processed packets on success, remaining budget on failure.
  */
-int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
+int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring,
+                       struct xsk_buff_pool *xsk_pool,
+                       int budget)
 {
        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
-       struct xsk_buff_pool *xsk_pool = rx_ring->xsk_pool;
        u32 ntc = rx_ring->next_to_clean;
        u32 ntu = rx_ring->next_to_use;
        struct xdp_buff *first = NULL;
@@ -896,7 +909,8 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
                if (ice_is_non_eop(rx_ring, rx_desc))
                        continue;
 
-               xdp_res = ice_run_xdp_zc(rx_ring, first, xdp_prog, xdp_ring);
+               xdp_res = ice_run_xdp_zc(rx_ring, first, xdp_prog, xdp_ring,
+                                        xsk_pool);
                if (likely(xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))) {
                        xdp_xmit |= xdp_res;
                } else if (xdp_res == ICE_XDP_EXIT) {
@@ -945,7 +959,8 @@ construct_skb:
        rx_ring->next_to_clean = ntc;
        entries_to_alloc = ICE_RX_DESC_UNUSED(rx_ring);
        if (entries_to_alloc > ICE_RING_QUARTER(rx_ring))
-               failure |= !ice_alloc_rx_bufs_zc(rx_ring, entries_to_alloc);
+               failure |= !ice_alloc_rx_bufs_zc(rx_ring, xsk_pool,
+                                                entries_to_alloc);
 
        ice_finalize_xdp_rx(xdp_ring, xdp_xmit, 0);
        ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
@@ -968,17 +983,19 @@ construct_skb:
 /**
  * ice_xmit_pkt - produce a single HW Tx descriptor out of AF_XDP descriptor
  * @xdp_ring: XDP ring to produce the HW Tx descriptor on
+ * @xsk_pool: XSK buffer pool to pick buffers to be consumed by HW
  * @desc: AF_XDP descriptor to pull the DMA address and length from
  * @total_bytes: bytes accumulator that will be used for stats update
  */
-static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring, struct xdp_desc *desc,
+static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring,
+                        struct xsk_buff_pool *xsk_pool, struct xdp_desc *desc,
                         unsigned int *total_bytes)
 {
        struct ice_tx_desc *tx_desc;
        dma_addr_t dma;
 
-       dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr);
-       xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len);
+       dma = xsk_buff_raw_get_dma(xsk_pool, desc->addr);
+       xsk_buff_raw_dma_sync_for_device(xsk_pool, dma, desc->len);
 
        tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use++);
        tx_desc->buf_addr = cpu_to_le64(dma);
@@ -991,10 +1008,13 @@ static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring, struct xdp_desc *desc,
 /**
  * ice_xmit_pkt_batch - produce a batch of HW Tx descriptors out of AF_XDP descriptors
  * @xdp_ring: XDP ring to produce the HW Tx descriptors on
+ * @xsk_pool: XSK buffer pool to pick buffers to be consumed by HW
  * @descs: AF_XDP descriptors to pull the DMA addresses and lengths from
  * @total_bytes: bytes accumulator that will be used for stats update
  */
-static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
+static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring,
+                              struct xsk_buff_pool *xsk_pool,
+                              struct xdp_desc *descs,
                               unsigned int *total_bytes)
 {
        u16 ntu = xdp_ring->next_to_use;
@@ -1004,8 +1024,8 @@ static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *de
        loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) {
                dma_addr_t dma;
 
-               dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, descs[i].addr);
-               xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, descs[i].len);
+               dma = xsk_buff_raw_get_dma(xsk_pool, descs[i].addr);
+               xsk_buff_raw_dma_sync_for_device(xsk_pool, dma, descs[i].len);
 
                tx_desc = ICE_TX_DESC(xdp_ring, ntu++);
                tx_desc->buf_addr = cpu_to_le64(dma);
@@ -1021,37 +1041,41 @@ static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *de
 /**
  * ice_fill_tx_hw_ring - produce the number of Tx descriptors onto ring
  * @xdp_ring: XDP ring to produce the HW Tx descriptors on
+ * @xsk_pool: XSK buffer pool to pick buffers to be consumed by HW
  * @descs: AF_XDP descriptors to pull the DMA addresses and lengths from
  * @nb_pkts: count of packets to be send
  * @total_bytes: bytes accumulator that will be used for stats update
  */
-static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
-                               u32 nb_pkts, unsigned int *total_bytes)
+static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring,
+                               struct xsk_buff_pool *xsk_pool,
+                               struct xdp_desc *descs, u32 nb_pkts,
+                               unsigned int *total_bytes)
 {
        u32 batched, leftover, i;
 
        batched = ALIGN_DOWN(nb_pkts, PKTS_PER_BATCH);
        leftover = nb_pkts & (PKTS_PER_BATCH - 1);
        for (i = 0; i < batched; i += PKTS_PER_BATCH)
-               ice_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes);
+               ice_xmit_pkt_batch(xdp_ring, xsk_pool, &descs[i], total_bytes);
        for (; i < batched + leftover; i++)
-               ice_xmit_pkt(xdp_ring, &descs[i], total_bytes);
+               ice_xmit_pkt(xdp_ring, xsk_pool, &descs[i], total_bytes);
 }
 
 /**
  * ice_xmit_zc - take entries from XSK Tx ring and place them onto HW Tx ring
  * @xdp_ring: XDP ring to produce the HW Tx descriptors on
+ * @xsk_pool: AF_XDP buffer pool pointer
  *
  * Returns true if there is no more work that needs to be done, false otherwise
  */
-bool ice_xmit_zc(struct ice_tx_ring *xdp_ring)
+bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, struct xsk_buff_pool *xsk_pool)
 {
-       struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs;
+       struct xdp_desc *descs = xsk_pool->tx_descs;
        u32 nb_pkts, nb_processed = 0;
        unsigned int total_bytes = 0;
        int budget;
 
-       ice_clean_xdp_irq_zc(xdp_ring);
+       ice_clean_xdp_irq_zc(xdp_ring, xsk_pool);
 
        if (!netif_carrier_ok(xdp_ring->vsi->netdev) ||
            !netif_running(xdp_ring->vsi->netdev))
@@ -1060,25 +1084,26 @@ bool ice_xmit_zc(struct ice_tx_ring *xdp_ring)
        budget = ICE_DESC_UNUSED(xdp_ring);
        budget = min_t(u16, budget, ICE_RING_QUARTER(xdp_ring));
 
-       nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget);
+       nb_pkts = xsk_tx_peek_release_desc_batch(xsk_pool, budget);
        if (!nb_pkts)
                return true;
 
        if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) {
                nb_processed = xdp_ring->count - xdp_ring->next_to_use;
-               ice_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes);
+               ice_fill_tx_hw_ring(xdp_ring, xsk_pool, descs, nb_processed,
+                                   &total_bytes);
                xdp_ring->next_to_use = 0;
        }
 
-       ice_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed,
-                           &total_bytes);
+       ice_fill_tx_hw_ring(xdp_ring, xsk_pool, &descs[nb_processed],
+                           nb_pkts - nb_processed, &total_bytes);
 
        ice_set_rs_bit(xdp_ring);
        ice_xdp_ring_update_tail(xdp_ring);
        ice_update_tx_ring_stats(xdp_ring, nb_pkts, total_bytes);
 
-       if (xsk_uses_need_wakeup(xdp_ring->xsk_pool))
-               xsk_set_tx_need_wakeup(xdp_ring->xsk_pool);
+       if (xsk_uses_need_wakeup(xsk_pool))
+               xsk_set_tx_need_wakeup(xsk_pool);
 
        return nb_pkts < budget;
 }
@@ -1111,7 +1136,7 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
 
        ring = vsi->rx_rings[queue_id]->xdp_ring;
 
-       if (!ring->xsk_pool)
+       if (!READ_ONCE(ring->xsk_pool))
                return -EINVAL;
 
        /* The idea here is that if NAPI is running, mark a miss, so
index 6fa181f080ef139f94ae1faee9c3442c0e0371b9..45adeb513253a0d12e11251c4070855451a7ed44 100644 (file)
@@ -20,16 +20,20 @@ struct ice_vsi;
 #ifdef CONFIG_XDP_SOCKETS
 int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool,
                       u16 qid);
-int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget);
+int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring,
+                       struct xsk_buff_pool *xsk_pool,
+                       int budget);
 int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
-bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count);
+bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring,
+                         struct xsk_buff_pool *xsk_pool, u16 count);
 bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
 void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring);
 void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring);
-bool ice_xmit_zc(struct ice_tx_ring *xdp_ring);
+bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, struct xsk_buff_pool *xsk_pool);
 int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc);
 #else
-static inline bool ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring)
+static inline bool ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring,
+                              struct xsk_buff_pool __always_unused *xsk_pool)
 {
        return false;
 }
@@ -44,6 +48,7 @@ ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi,
 
 static inline int
 ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused *rx_ring,
+                   struct xsk_buff_pool __always_unused *xsk_pool,
                    int __always_unused budget)
 {
        return 0;
@@ -51,6 +56,7 @@ ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused *rx_ring,
 
 static inline bool
 ice_alloc_rx_bufs_zc(struct ice_rx_ring __always_unused *rx_ring,
+                    struct xsk_buff_pool __always_unused *xsk_pool,
                     u16 __always_unused count)
 {
        return false;