]> git.dujemihanovic.xyz Git - linux.git/commitdiff
drm/ttm: stop pooling cached NUMA pages v2
authorChristian König <ckoenig.leichtzumerken@gmail.com>
Mon, 15 Apr 2024 13:48:21 +0000 (15:48 +0200)
committerChristian König <christian.koenig@amd.com>
Mon, 15 Apr 2024 14:57:41 +0000 (16:57 +0200)
We only pool write combined and uncached allocations because they
require extra overhead on allocation and release.

If we also pool cached NUMA it not only means some extra unnecessary
overhead, but also that under memory pressure it can happen that
pages from the wrong NUMA node enters the pool and are re-used
over and over again.

This can lead to performance reduction after running into memory
pressure.

v2: restructure and cleanup the code a bit from the internal hack to
    test this.

Signed-off-by: Christian König <christian.koenig@amd.com>
Fixes: 4482d3c94d7f ("drm/ttm: add NUMA node id to the pool")
CC: stable@vger.kernel.org
Reviewed-by: Felix Kuehling <felix.kuehling@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240415134821.1919-1-christian.koenig@amd.com
drivers/gpu/drm/ttm/ttm_pool.c

index 112438d965ffbefd4fa2cce5f246cc03a63759f9..6e1fd6985ffcb730eb7057c4509aec971dfa8266 100644 (file)
@@ -288,17 +288,23 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
                                                  enum ttm_caching caching,
                                                  unsigned int order)
 {
-       if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE)
+       if (pool->use_dma_alloc)
                return &pool->caching[caching].orders[order];
 
 #ifdef CONFIG_X86
        switch (caching) {
        case ttm_write_combined:
+               if (pool->nid != NUMA_NO_NODE)
+                       return &pool->caching[caching].orders[order];
+
                if (pool->use_dma32)
                        return &global_dma32_write_combined[order];
 
                return &global_write_combined[order];
        case ttm_uncached:
+               if (pool->nid != NUMA_NO_NODE)
+                       return &pool->caching[caching].orders[order];
+
                if (pool->use_dma32)
                        return &global_dma32_uncached[order];
 
@@ -566,11 +572,17 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
        pool->use_dma_alloc = use_dma_alloc;
        pool->use_dma32 = use_dma32;
 
-       if (use_dma_alloc || nid != NUMA_NO_NODE) {
-               for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
-                       for (j = 0; j < NR_PAGE_ORDERS; ++j)
-                               ttm_pool_type_init(&pool->caching[i].orders[j],
-                                                  pool, i, j);
+       for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
+               for (j = 0; j < NR_PAGE_ORDERS; ++j) {
+                       struct ttm_pool_type *pt;
+
+                       /* Initialize only pool types which are actually used */
+                       pt = ttm_pool_select_type(pool, i, j);
+                       if (pt != &pool->caching[i].orders[j])
+                               continue;
+
+                       ttm_pool_type_init(pt, pool, i, j);
+               }
        }
 }
 EXPORT_SYMBOL(ttm_pool_init);
@@ -599,10 +611,16 @@ void ttm_pool_fini(struct ttm_pool *pool)
 {
        unsigned int i, j;
 
-       if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE) {
-               for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
-                       for (j = 0; j < NR_PAGE_ORDERS; ++j)
-                               ttm_pool_type_fini(&pool->caching[i].orders[j]);
+       for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
+               for (j = 0; j < NR_PAGE_ORDERS; ++j) {
+                       struct ttm_pool_type *pt;
+
+                       pt = ttm_pool_select_type(pool, i, j);
+                       if (pt != &pool->caching[i].orders[j])
+                               continue;
+
+                       ttm_pool_type_fini(pt);
+               }
        }
 
        /* We removed the pool types from the LRU, but we need to also make sure