From: Christophe Leroy Date: Tue, 11 Jul 2023 15:59:20 +0000 (+0200) Subject: powerpc/kuap: KUAP enabling/disabling functions must be __always_inline X-Git-Tag: v6.6-pxa1908~412^2~125 X-Git-Url: https://git.dujemihanovic.xyz/?a=commitdiff_plain;h=eb52f66f0abd468caf8be4e690d7fdef96250c2f;p=linux.git powerpc/kuap: KUAP enabling/disabling functions must be __always_inline Objtool reports following warnings: arch/powerpc/kernel/signal_32.o: warning: objtool: __prevent_user_access.constprop.0+0x4 (.text+0x4): redundant UACCESS disable arch/powerpc/kernel/signal_32.o: warning: objtool: user_access_begin+0x2c (.text+0x4c): return with UACCESS enabled arch/powerpc/kernel/signal_32.o: warning: objtool: handle_rt_signal32+0x188 (.text+0x360): call to __prevent_user_access.constprop.0() with UACCESS enabled arch/powerpc/kernel/signal_32.o: warning: objtool: handle_signal32+0x150 (.text+0x4d4): call to __prevent_user_access.constprop.0() with UACCESS enabled This is due to some KUAP enabling/disabling functions being outline allthough they are marked inline. Use __always_inline instead. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://msgid.link/ca5e50ddbec3867db5146ebddbc9a1dc0e443bc8.1689091022.git.christophe.leroy@csgroup.eu --- diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h index 452d4efa84f5..931d200afe56 100644 --- a/arch/powerpc/include/asm/book3s/32/kup.h +++ b/arch/powerpc/include/asm/book3s/32/kup.h @@ -15,19 +15,19 @@ #define KUAP_NONE (~0UL) -static inline void kuap_lock_one(unsigned long addr) +static __always_inline void kuap_lock_one(unsigned long addr) { mtsr(mfsr(addr) | SR_KS, addr); isync(); /* Context sync required after mtsr() */ } -static inline void kuap_unlock_one(unsigned long addr) +static __always_inline void kuap_unlock_one(unsigned long addr) { mtsr(mfsr(addr) & ~SR_KS, addr); isync(); /* Context sync required after mtsr() */ } -static inline void __kuap_save_and_lock(struct pt_regs *regs) +static __always_inline void __kuap_save_and_lock(struct pt_regs *regs) { unsigned long kuap = current->thread.kuap; @@ -40,11 +40,11 @@ static inline void __kuap_save_and_lock(struct pt_regs *regs) } #define __kuap_save_and_lock __kuap_save_and_lock -static inline void kuap_user_restore(struct pt_regs *regs) +static __always_inline void kuap_user_restore(struct pt_regs *regs) { } -static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap) +static __always_inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap) { if (unlikely(kuap != KUAP_NONE)) { current->thread.kuap = KUAP_NONE; @@ -59,7 +59,7 @@ static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kua kuap_unlock_one(regs->kuap); } -static inline unsigned long __kuap_get_and_assert_locked(void) +static __always_inline unsigned long __kuap_get_and_assert_locked(void) { unsigned long kuap = current->thread.kuap; @@ -94,7 +94,7 @@ static __always_inline void __prevent_user_access(unsigned long dir) kuap_lock_one(kuap); } -static inline unsigned long __prevent_user_access_return(void) +static __always_inline unsigned long __prevent_user_access_return(void) { unsigned long flags = current->thread.kuap; @@ -106,7 +106,7 @@ static inline unsigned long __prevent_user_access_return(void) return flags; } -static inline void __restore_user_access(unsigned long flags) +static __always_inline void __restore_user_access(unsigned long flags) { if (flags != KUAP_NONE) { current->thread.kuap = flags; @@ -114,7 +114,7 @@ static inline void __restore_user_access(unsigned long flags) } } -static inline bool +static __always_inline bool __bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) { unsigned long kuap = regs->kuap; diff --git a/arch/powerpc/include/asm/book3s/64/kup.h b/arch/powerpc/include/asm/book3s/64/kup.h index a014f4d9a2aa..497a7bd31ecc 100644 --- a/arch/powerpc/include/asm/book3s/64/kup.h +++ b/arch/powerpc/include/asm/book3s/64/kup.h @@ -213,14 +213,14 @@ extern u64 __ro_after_init default_iamr; * access restrictions. Because of this ignore AMR value when accessing * userspace via kernel thread. */ -static inline u64 current_thread_amr(void) +static __always_inline u64 current_thread_amr(void) { if (current->thread.regs) return current->thread.regs->amr; return default_amr; } -static inline u64 current_thread_iamr(void) +static __always_inline u64 current_thread_iamr(void) { if (current->thread.regs) return current->thread.regs->iamr; @@ -230,7 +230,7 @@ static inline u64 current_thread_iamr(void) #ifdef CONFIG_PPC_KUAP -static inline void kuap_user_restore(struct pt_regs *regs) +static __always_inline void kuap_user_restore(struct pt_regs *regs) { bool restore_amr = false, restore_iamr = false; unsigned long amr, iamr; @@ -269,7 +269,7 @@ static inline void kuap_user_restore(struct pt_regs *regs) */ } -static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long amr) +static __always_inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long amr) { if (likely(regs->amr == amr)) return; @@ -285,7 +285,7 @@ static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long amr */ } -static inline unsigned long __kuap_get_and_assert_locked(void) +static __always_inline unsigned long __kuap_get_and_assert_locked(void) { unsigned long amr = mfspr(SPRN_AMR); @@ -302,7 +302,7 @@ static inline unsigned long __kuap_get_and_assert_locked(void) * because that would require an expensive read/modify write of the AMR. */ -static inline unsigned long get_kuap(void) +static __always_inline unsigned long get_kuap(void) { /* * We return AMR_KUAP_BLOCKED when we don't support KUAP because @@ -332,7 +332,8 @@ static __always_inline void set_kuap(unsigned long value) isync(); } -static inline bool __bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) +static __always_inline bool +__bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) { /* * For radix this will be a storage protection fault (DSISR_PROTFAULT). @@ -375,12 +376,12 @@ static __always_inline void allow_user_access(void __user *to, const void __user #else /* CONFIG_PPC_KUAP */ -static inline unsigned long get_kuap(void) +static __always_inline unsigned long get_kuap(void) { return AMR_KUAP_BLOCKED; } -static inline void set_kuap(unsigned long value) { } +static __always_inline void set_kuap(unsigned long value) { } static __always_inline void allow_user_access(void __user *to, const void __user *from, unsigned long size, unsigned long dir) @@ -395,7 +396,7 @@ static __always_inline void prevent_user_access(unsigned long dir) do_uaccess_flush(); } -static inline unsigned long prevent_user_access_return(void) +static __always_inline unsigned long prevent_user_access_return(void) { unsigned long flags = get_kuap(); @@ -406,7 +407,7 @@ static inline unsigned long prevent_user_access_return(void) return flags; } -static inline void restore_user_access(unsigned long flags) +static __always_inline void restore_user_access(unsigned long flags) { set_kuap(flags); if (static_branch_unlikely(&uaccess_flush_key) && flags == AMR_KUAP_BLOCKED) diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h index bab161b609c1..77adb9cd2da5 100644 --- a/arch/powerpc/include/asm/kup.h +++ b/arch/powerpc/include/asm/kup.h @@ -57,14 +57,14 @@ static inline void setup_kuap(bool disabled) { } static __always_inline bool kuap_is_disabled(void) { return true; } -static inline bool +static __always_inline bool __bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) { return false; } -static inline void kuap_user_restore(struct pt_regs *regs) { } -static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long amr) { } +static __always_inline void kuap_user_restore(struct pt_regs *regs) { } +static __always_inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long amr) { } /* * book3s/64/kup-radix.h defines these functions for the !KUAP case to flush @@ -72,11 +72,11 @@ static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long amr * platforms. */ #ifndef CONFIG_PPC_BOOK3S_64 -static inline void __allow_user_access(void __user *to, const void __user *from, - unsigned long size, unsigned long dir) { } -static inline void __prevent_user_access(unsigned long dir) { } -static inline unsigned long __prevent_user_access_return(void) { return 0UL; } -static inline void __restore_user_access(unsigned long flags) { } +static __always_inline void __allow_user_access(void __user *to, const void __user *from, + unsigned long size, unsigned long dir) { } +static __always_inline void __prevent_user_access(unsigned long dir) { } +static __always_inline unsigned long __prevent_user_access_return(void) { return 0UL; } +static __always_inline void __restore_user_access(unsigned long flags) { } #endif /* CONFIG_PPC_BOOK3S_64 */ #endif /* CONFIG_PPC_KUAP */ diff --git a/arch/powerpc/include/asm/nohash/32/kup-8xx.h b/arch/powerpc/include/asm/nohash/32/kup-8xx.h index d0601859c45a..e231b3afed98 100644 --- a/arch/powerpc/include/asm/nohash/32/kup-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/kup-8xx.h @@ -11,24 +11,24 @@ #include -static inline void __kuap_save_and_lock(struct pt_regs *regs) +static __always_inline void __kuap_save_and_lock(struct pt_regs *regs) { regs->kuap = mfspr(SPRN_MD_AP); mtspr(SPRN_MD_AP, MD_APG_KUAP); } #define __kuap_save_and_lock __kuap_save_and_lock -static inline void kuap_user_restore(struct pt_regs *regs) +static __always_inline void kuap_user_restore(struct pt_regs *regs) { } -static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap) +static __always_inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap) { mtspr(SPRN_MD_AP, regs->kuap); } #ifdef CONFIG_PPC_KUAP_DEBUG -static inline unsigned long __kuap_get_and_assert_locked(void) +static __always_inline unsigned long __kuap_get_and_assert_locked(void) { WARN_ON_ONCE(mfspr(SPRN_MD_AP) >> 16 != MD_APG_KUAP >> 16); @@ -37,18 +37,18 @@ static inline unsigned long __kuap_get_and_assert_locked(void) #define __kuap_get_and_assert_locked __kuap_get_and_assert_locked #endif -static inline void __allow_user_access(void __user *to, const void __user *from, - unsigned long size, unsigned long dir) +static __always_inline void __allow_user_access(void __user *to, const void __user *from, + unsigned long size, unsigned long dir) { mtspr(SPRN_MD_AP, MD_APG_INIT); } -static inline void __prevent_user_access(unsigned long dir) +static __always_inline void __prevent_user_access(unsigned long dir) { mtspr(SPRN_MD_AP, MD_APG_KUAP); } -static inline unsigned long __prevent_user_access_return(void) +static __always_inline unsigned long __prevent_user_access_return(void) { unsigned long flags; @@ -59,12 +59,12 @@ static inline unsigned long __prevent_user_access_return(void) return flags; } -static inline void __restore_user_access(unsigned long flags) +static __always_inline void __restore_user_access(unsigned long flags) { mtspr(SPRN_MD_AP, flags); } -static inline bool +static __always_inline bool __bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) { return !((regs->kuap ^ MD_APG_KUAP) & 0xff000000); diff --git a/arch/powerpc/include/asm/nohash/kup-booke.h b/arch/powerpc/include/asm/nohash/kup-booke.h index 8e4734c8fef1..98780a2d3dcd 100644 --- a/arch/powerpc/include/asm/nohash/kup-booke.h +++ b/arch/powerpc/include/asm/nohash/kup-booke.h @@ -17,14 +17,14 @@ #include -static inline void __kuap_lock(void) +static __always_inline void __kuap_lock(void) { mtspr(SPRN_PID, 0); isync(); } #define __kuap_lock __kuap_lock -static inline void __kuap_save_and_lock(struct pt_regs *regs) +static __always_inline void __kuap_save_and_lock(struct pt_regs *regs) { regs->kuap = mfspr(SPRN_PID); mtspr(SPRN_PID, 0); @@ -32,7 +32,7 @@ static inline void __kuap_save_and_lock(struct pt_regs *regs) } #define __kuap_save_and_lock __kuap_save_and_lock -static inline void kuap_user_restore(struct pt_regs *regs) +static __always_inline void kuap_user_restore(struct pt_regs *regs) { if (kuap_is_disabled()) return; @@ -42,7 +42,7 @@ static inline void kuap_user_restore(struct pt_regs *regs) /* Context synchronisation is performed by rfi */ } -static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap) +static __always_inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap) { if (regs->kuap) mtspr(SPRN_PID, current->thread.pid); @@ -51,7 +51,7 @@ static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kua } #ifdef CONFIG_PPC_KUAP_DEBUG -static inline unsigned long __kuap_get_and_assert_locked(void) +static __always_inline unsigned long __kuap_get_and_assert_locked(void) { WARN_ON_ONCE(mfspr(SPRN_PID)); @@ -60,20 +60,20 @@ static inline unsigned long __kuap_get_and_assert_locked(void) #define __kuap_get_and_assert_locked __kuap_get_and_assert_locked #endif -static inline void __allow_user_access(void __user *to, const void __user *from, - unsigned long size, unsigned long dir) +static __always_inline void __allow_user_access(void __user *to, const void __user *from, + unsigned long size, unsigned long dir) { mtspr(SPRN_PID, current->thread.pid); isync(); } -static inline void __prevent_user_access(unsigned long dir) +static __always_inline void __prevent_user_access(unsigned long dir) { mtspr(SPRN_PID, 0); isync(); } -static inline unsigned long __prevent_user_access_return(void) +static __always_inline unsigned long __prevent_user_access_return(void) { unsigned long flags = mfspr(SPRN_PID); @@ -83,7 +83,7 @@ static inline unsigned long __prevent_user_access_return(void) return flags; } -static inline void __restore_user_access(unsigned long flags) +static __always_inline void __restore_user_access(unsigned long flags) { if (flags) { mtspr(SPRN_PID, current->thread.pid); @@ -91,7 +91,7 @@ static inline void __restore_user_access(unsigned long flags) } } -static inline bool +static __always_inline bool __bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) { return !regs->kuap; diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index a2d255aa9627..fb725ec77926 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -386,7 +386,7 @@ copy_mc_to_user(void __user *to, const void *from, unsigned long n) extern long __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size); -static __must_check inline bool user_access_begin(const void __user *ptr, size_t len) +static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len) { if (unlikely(!access_ok(ptr, len))) return false; @@ -401,7 +401,7 @@ static __must_check inline bool user_access_begin(const void __user *ptr, size_t #define user_access_save prevent_user_access_return #define user_access_restore restore_user_access -static __must_check inline bool +static __must_check __always_inline bool user_read_access_begin(const void __user *ptr, size_t len) { if (unlikely(!access_ok(ptr, len))) @@ -415,7 +415,7 @@ user_read_access_begin(const void __user *ptr, size_t len) #define user_read_access_begin user_read_access_begin #define user_read_access_end prevent_current_read_from_user -static __must_check inline bool +static __must_check __always_inline bool user_write_access_begin(const void __user *ptr, size_t len) { if (unlikely(!access_ok(ptr, len)))