]> git.dujemihanovic.xyz Git - linux.git/commitdiff
page_pool: check for DMA sync shortcut earlier
authorAlexander Lobakin <aleksander.lobakin@intel.com>
Tue, 7 May 2024 11:20:25 +0000 (13:20 +0200)
committerChristoph Hellwig <hch@lst.de>
Wed, 8 May 2024 06:51:20 +0000 (08:51 +0200)
We can save a couple more function calls in the Page Pool code if we
check for dma_need_sync() earlier, just when we test pp->p.dma_sync.
Move both these checks into an inline wrapper and call the PP wrapper
over the generic DMA sync function only when both are true.
You can't cache the result of dma_need_sync() in &page_pool, as it may
change anytime if an SWIOTLB buffer is allocated or mapped.

Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
net/core/page_pool.c

index c2819ff03dd25c23a59919ea20a044b0f9753df6..4f9d1bd7f4d187778480fa1f388fbbf428584473 100644 (file)
@@ -398,16 +398,26 @@ static struct page *__page_pool_get_cached(struct page_pool *pool)
        return page;
 }
 
-static void page_pool_dma_sync_for_device(struct page_pool *pool,
-                                         struct page *page,
-                                         unsigned int dma_sync_size)
+static void __page_pool_dma_sync_for_device(const struct page_pool *pool,
+                                           struct page *page,
+                                           u32 dma_sync_size)
 {
+#if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC)
        dma_addr_t dma_addr = page_pool_get_dma_addr(page);
 
        dma_sync_size = min(dma_sync_size, pool->p.max_len);
-       dma_sync_single_range_for_device(pool->p.dev, dma_addr,
-                                        pool->p.offset, dma_sync_size,
-                                        pool->p.dma_dir);
+       __dma_sync_single_for_device(pool->p.dev, dma_addr + pool->p.offset,
+                                    dma_sync_size, pool->p.dma_dir);
+#endif
+}
+
+static __always_inline void
+page_pool_dma_sync_for_device(const struct page_pool *pool,
+                             struct page *page,
+                             u32 dma_sync_size)
+{
+       if (pool->dma_sync && dma_dev_need_sync(pool->p.dev))
+               __page_pool_dma_sync_for_device(pool, page, dma_sync_size);
 }
 
 static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
@@ -429,8 +439,7 @@ static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
        if (page_pool_set_dma_addr(page, dma))
                goto unmap_failed;
 
-       if (pool->dma_sync)
-               page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
+       page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
 
        return true;
 
@@ -699,9 +708,7 @@ __page_pool_put_page(struct page_pool *pool, struct page *page,
        if (likely(__page_pool_page_can_be_recycled(page))) {
                /* Read barrier done in page_ref_count / READ_ONCE */
 
-               if (pool->dma_sync)
-                       page_pool_dma_sync_for_device(pool, page,
-                                                     dma_sync_size);
+               page_pool_dma_sync_for_device(pool, page, dma_sync_size);
 
                if (allow_direct && in_softirq() &&
                    page_pool_recycle_in_cache(page, pool))
@@ -812,9 +819,7 @@ static struct page *page_pool_drain_frag(struct page_pool *pool,
                return NULL;
 
        if (__page_pool_page_can_be_recycled(page)) {
-               if (pool->dma_sync)
-                       page_pool_dma_sync_for_device(pool, page, -1);
-
+               page_pool_dma_sync_for_device(pool, page, -1);
                return page;
        }