]> git.dujemihanovic.xyz Git - linux.git/commitdiff
mm: HUGE_VMAP arch support cleanup
authorNicholas Piggin <npiggin@gmail.com>
Fri, 30 Apr 2021 05:58:26 +0000 (22:58 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 30 Apr 2021 18:20:40 +0000 (11:20 -0700)
This changes the awkward approach where architectures provide init
functions to determine which levels they can provide large mappings for,
to one where the arch is queried for each call.

This removes code and indirection, and allows constant-folding of dead
code for unsupported levels.

This also adds a prot argument to the arch query.  This is unused
currently but could help with some architectures (e.g., some powerpc
processors can't map uncacheable memory with large pages).

Link: https://lkml.kernel.org/r/20210317062402.533919-7-npiggin@gmail.com
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Reviewed-by: Ding Tianhong <dingtianhong@huawei.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com> [arm64]
Cc: Will Deacon <will@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Uladzislau Rezki (Sony) <urezki@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/arm64/include/asm/vmalloc.h
arch/arm64/mm/mmu.c
arch/powerpc/include/asm/vmalloc.h
arch/powerpc/mm/book3s64/radix_pgtable.c
arch/x86/include/asm/vmalloc.h
arch/x86/mm/ioremap.c
include/linux/io.h
include/linux/vmalloc.h
init/main.c
mm/debug_vm_pgtable.c
mm/ioremap.c

index 2ca708ab9b20b0c647c6839d682f5de93cf1541c..597b40405319d8da1761a64d9644400745c2029a 100644 (file)
@@ -1,4 +1,12 @@
 #ifndef _ASM_ARM64_VMALLOC_H
 #define _ASM_ARM64_VMALLOC_H
 
+#include <asm/page.h>
+
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+bool arch_vmap_p4d_supported(pgprot_t prot);
+bool arch_vmap_pud_supported(pgprot_t prot);
+bool arch_vmap_pmd_supported(pgprot_t prot);
+#endif
+
 #endif /* _ASM_ARM64_VMALLOC_H */
index d563335ad43f5ef21d3ac6579a4989ac770687d3..8436e0755361ab6c89416498700289d7f48022b0 100644 (file)
@@ -1339,12 +1339,12 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
        return dt_virt;
 }
 
-int __init arch_ioremap_p4d_supported(void)
+bool arch_vmap_p4d_supported(pgprot_t prot)
 {
-       return 0;
+       return false;
 }
 
-int __init arch_ioremap_pud_supported(void)
+bool arch_vmap_pud_supported(pgprot_t prot)
 {
        /*
         * Only 4k granule supports level 1 block mappings.
@@ -1354,9 +1354,9 @@ int __init arch_ioremap_pud_supported(void)
               !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS);
 }
 
-int __init arch_ioremap_pmd_supported(void)
+bool arch_vmap_pmd_supported(pgprot_t prot)
 {
-       /* See arch_ioremap_pud_supported() */
+       /* See arch_vmap_pud_supported() */
        return !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS);
 }
 
index b992dfaaa161814dbbdd184c7b5a31e7b95758c6..105abb73f075cf0204f147c60b9659658a60a805 100644 (file)
@@ -1,4 +1,12 @@
 #ifndef _ASM_POWERPC_VMALLOC_H
 #define _ASM_POWERPC_VMALLOC_H
 
+#include <asm/page.h>
+
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+bool arch_vmap_p4d_supported(pgprot_t prot);
+bool arch_vmap_pud_supported(pgprot_t prot);
+bool arch_vmap_pmd_supported(pgprot_t prot);
+#endif
+
 #endif /* _ASM_POWERPC_VMALLOC_H */
index 98f0b243c1ab21e0953645dae57a5bf33b8ac80b..743807fc210f3a2618c207e949e8afdebd6c9d36 100644 (file)
@@ -1082,13 +1082,13 @@ void radix__ptep_modify_prot_commit(struct vm_area_struct *vma,
        set_pte_at(mm, addr, ptep, pte);
 }
 
-int __init arch_ioremap_pud_supported(void)
+bool arch_vmap_pud_supported(pgprot_t prot)
 {
        /* HPT does not cope with large pages in the vmalloc area */
        return radix_enabled();
 }
 
-int __init arch_ioremap_pmd_supported(void)
+bool arch_vmap_pmd_supported(pgprot_t prot)
 {
        return radix_enabled();
 }
@@ -1182,7 +1182,7 @@ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
        return 1;
 }
 
-int __init arch_ioremap_p4d_supported(void)
+bool arch_vmap_p4d_supported(pgprot_t prot)
 {
-       return 0;
+       return false;
 }
index 29837740b52092e5c406157e34bde4ccb5b0913d..094ea2b565f37cbd3b9ff7bd038fec5f471c22f6 100644 (file)
@@ -1,6 +1,13 @@
 #ifndef _ASM_X86_VMALLOC_H
 #define _ASM_X86_VMALLOC_H
 
+#include <asm/page.h>
 #include <asm/pgtable_areas.h>
 
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+bool arch_vmap_p4d_supported(pgprot_t prot);
+bool arch_vmap_pud_supported(pgprot_t prot);
+bool arch_vmap_pmd_supported(pgprot_t prot);
+#endif
+
 #endif /* _ASM_X86_VMALLOC_H */
index 9e5ccc56f8e0775e5a3fda8661d44cd5d8f7a760..fbaf0c447986067e7771580ffdd43fdf2e0073e8 100644 (file)
@@ -481,24 +481,26 @@ void iounmap(volatile void __iomem *addr)
 }
 EXPORT_SYMBOL(iounmap);
 
-int __init arch_ioremap_p4d_supported(void)
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+bool arch_vmap_p4d_supported(pgprot_t prot)
 {
-       return 0;
+       return false;
 }
 
-int __init arch_ioremap_pud_supported(void)
+bool arch_vmap_pud_supported(pgprot_t prot)
 {
 #ifdef CONFIG_X86_64
        return boot_cpu_has(X86_FEATURE_GBPAGES);
 #else
-       return 0;
+       return false;
 #endif
 }
 
-int __init arch_ioremap_pmd_supported(void)
+bool arch_vmap_pmd_supported(pgprot_t prot)
 {
        return boot_cpu_has(X86_FEATURE_PSE);
 }
+#endif
 
 /*
  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
index 61ff7d6278b694e870e46b946c0f79d95d9d9b14..9595151d800d6719112a9a6877bc10537f2fc796 100644 (file)
@@ -31,15 +31,6 @@ static inline int ioremap_page_range(unsigned long addr, unsigned long end,
 }
 #endif
 
-#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
-void __init ioremap_huge_init(void);
-int arch_ioremap_p4d_supported(void);
-int arch_ioremap_pud_supported(void);
-int arch_ioremap_pmd_supported(void);
-#else
-static inline void ioremap_huge_init(void) { }
-#endif
-
 /*
  * Managed iomap interface
  */
index 3de7be6dd17cd955abda5f82b45603eada21ee2c..358c51c702c045860de12f2087e389f54da8ae26 100644 (file)
@@ -78,6 +78,12 @@ struct vmap_area {
        };
 };
 
+#ifndef CONFIG_HAVE_ARCH_HUGE_VMAP
+static inline bool arch_vmap_p4d_supported(pgprot_t prot) { return false; }
+static inline bool arch_vmap_pud_supported(pgprot_t prot) { return false; }
+static inline bool arch_vmap_pmd_supported(pgprot_t prot) { return false; }
+#endif
+
 /*
  *     Highlevel APIs for driver use
  */
index f498aac26e8cbedf1178a39cee3cc043c8409729..ae96c79ad2d3b6695d7176b0fe932f28b9e5024b 100644 (file)
@@ -837,7 +837,6 @@ static void __init mm_init(void)
        pgtable_init();
        debug_objects_mem_init();
        vmalloc_init();
-       ioremap_huge_init();
        /* Should be run before the first non-init thread is created */
        init_espfix_bsp();
        /* Should be run after espfix64 is set up. */
index a9bd6ce1ba02b3f60a76bc67381d85a9f0743c1e..05efe98a9ac2c8c514ae7ea46db1e617cecd5251 100644 (file)
@@ -247,7 +247,7 @@ static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot)
 {
        pmd_t pmd;
 
-       if (!arch_ioremap_pmd_supported())
+       if (!arch_vmap_pmd_supported(prot))
                return;
 
        pr_debug("Validating PMD huge\n");
@@ -385,7 +385,7 @@ static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
 {
        pud_t pud;
 
-       if (!arch_ioremap_pud_supported())
+       if (!arch_vmap_pud_supported(prot))
                return;
 
        pr_debug("Validating PUD huge\n");
index 3f4d36f9745a7878cd53b1d64fcd82cd4ad8b906..3264d0203785d62085bfdb8681d40c2438202f64 100644 (file)
 #include "pgalloc-track.h"
 
 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
-static int __read_mostly ioremap_p4d_capable;
-static int __read_mostly ioremap_pud_capable;
-static int __read_mostly ioremap_pmd_capable;
-static int __read_mostly ioremap_huge_disabled;
+static bool __ro_after_init iomap_max_page_shift = PAGE_SHIFT;
 
 static int __init set_nohugeiomap(char *str)
 {
-       ioremap_huge_disabled = 1;
+       iomap_max_page_shift = P4D_SHIFT;
        return 0;
 }
 early_param("nohugeiomap", set_nohugeiomap);
-
-void __init ioremap_huge_init(void)
-{
-       if (!ioremap_huge_disabled) {
-               if (arch_ioremap_p4d_supported())
-                       ioremap_p4d_capable = 1;
-               if (arch_ioremap_pud_supported())
-                       ioremap_pud_capable = 1;
-               if (arch_ioremap_pmd_supported())
-                       ioremap_pmd_capable = 1;
-       }
-}
-
-static inline int ioremap_p4d_enabled(void)
-{
-       return ioremap_p4d_capable;
-}
-
-static inline int ioremap_pud_enabled(void)
-{
-       return ioremap_pud_capable;
-}
-
-static inline int ioremap_pmd_enabled(void)
-{
-       return ioremap_pmd_capable;
-}
-
-#else  /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
-static inline int ioremap_p4d_enabled(void) { return 0; }
-static inline int ioremap_pud_enabled(void) { return 0; }
-static inline int ioremap_pmd_enabled(void) { return 0; }
+#else /* CONFIG_HAVE_ARCH_HUGE_VMAP */
+static const bool iomap_max_page_shift = PAGE_SHIFT;
 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
 
 static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
@@ -82,9 +49,13 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
 }
 
 static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end,
-                       phys_addr_t phys_addr, pgprot_t prot)
+                       phys_addr_t phys_addr, pgprot_t prot,
+                       unsigned int max_page_shift)
 {
-       if (!ioremap_pmd_enabled())
+       if (max_page_shift < PMD_SHIFT)
+               return 0;
+
+       if (!arch_vmap_pmd_supported(prot))
                return 0;
 
        if ((end - addr) != PMD_SIZE)
@@ -104,7 +75,7 @@ static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end,
 
 static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
                        phys_addr_t phys_addr, pgprot_t prot,
-                       pgtbl_mod_mask *mask)
+                       unsigned int max_page_shift, pgtbl_mod_mask *mask)
 {
        pmd_t *pmd;
        unsigned long next;
@@ -115,7 +86,8 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
        do {
                next = pmd_addr_end(addr, end);
 
-               if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot)) {
+               if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot,
+                                       max_page_shift)) {
                        *mask |= PGTBL_PMD_MODIFIED;
                        continue;
                }
@@ -127,9 +99,13 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
 }
 
 static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end,
-                       phys_addr_t phys_addr, pgprot_t prot)
+                       phys_addr_t phys_addr, pgprot_t prot,
+                       unsigned int max_page_shift)
 {
-       if (!ioremap_pud_enabled())
+       if (max_page_shift < PUD_SHIFT)
+               return 0;
+
+       if (!arch_vmap_pud_supported(prot))
                return 0;
 
        if ((end - addr) != PUD_SIZE)
@@ -149,7 +125,7 @@ static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end,
 
 static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
                        phys_addr_t phys_addr, pgprot_t prot,
-                       pgtbl_mod_mask *mask)
+                       unsigned int max_page_shift, pgtbl_mod_mask *mask)
 {
        pud_t *pud;
        unsigned long next;
@@ -160,21 +136,27 @@ static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
        do {
                next = pud_addr_end(addr, end);
 
-               if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot)) {
+               if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot,
+                                       max_page_shift)) {
                        *mask |= PGTBL_PUD_MODIFIED;
                        continue;
                }
 
-               if (vmap_pmd_range(pud, addr, next, phys_addr, prot, mask))
+               if (vmap_pmd_range(pud, addr, next, phys_addr, prot,
+                                       max_page_shift, mask))
                        return -ENOMEM;
        } while (pud++, phys_addr += (next - addr), addr = next, addr != end);
        return 0;
 }
 
 static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end,
-                       phys_addr_t phys_addr, pgprot_t prot)
+                       phys_addr_t phys_addr, pgprot_t prot,
+                       unsigned int max_page_shift)
 {
-       if (!ioremap_p4d_enabled())
+       if (max_page_shift < P4D_SHIFT)
+               return 0;
+
+       if (!arch_vmap_p4d_supported(prot))
                return 0;
 
        if ((end - addr) != P4D_SIZE)
@@ -194,7 +176,7 @@ static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end,
 
 static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
                        phys_addr_t phys_addr, pgprot_t prot,
-                       pgtbl_mod_mask *mask)
+                       unsigned int max_page_shift, pgtbl_mod_mask *mask)
 {
        p4d_t *p4d;
        unsigned long next;
@@ -205,19 +187,22 @@ static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
        do {
                next = p4d_addr_end(addr, end);
 
-               if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot)) {
+               if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot,
+                                       max_page_shift)) {
                        *mask |= PGTBL_P4D_MODIFIED;
                        continue;
                }
 
-               if (vmap_pud_range(p4d, addr, next, phys_addr, prot, mask))
+               if (vmap_pud_range(p4d, addr, next, phys_addr, prot,
+                                       max_page_shift, mask))
                        return -ENOMEM;
        } while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
        return 0;
 }
 
 static int vmap_range(unsigned long addr, unsigned long end,
-                       phys_addr_t phys_addr, pgprot_t prot)
+                       phys_addr_t phys_addr, pgprot_t prot,
+                       unsigned int max_page_shift)
 {
        pgd_t *pgd;
        unsigned long start;
@@ -232,7 +217,8 @@ static int vmap_range(unsigned long addr, unsigned long end,
        pgd = pgd_offset_k(addr);
        do {
                next = pgd_addr_end(addr, end);
-               err = vmap_p4d_range(pgd, addr, next, phys_addr, prot, &mask);
+               err = vmap_p4d_range(pgd, addr, next, phys_addr, prot,
+                                       max_page_shift, &mask);
                if (err)
                        break;
        } while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
@@ -248,7 +234,7 @@ static int vmap_range(unsigned long addr, unsigned long end,
 int ioremap_page_range(unsigned long addr,
                       unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
 {
-       return vmap_range(addr, end, phys_addr, prot);
+       return vmap_range(addr, end, phys_addr, prot, iomap_max_page_shift);
 }
 
 #ifdef CONFIG_GENERIC_IOREMAP