From: Philip Oberfichtner Date: Fri, 2 Aug 2024 09:25:35 +0000 (+0200) Subject: x86: provide mb() macro X-Git-Url: http://git.dujemihanovic.xyz/img/static/git-favicon.png?a=commitdiff_plain;h=3eb7f46a3c40b59aa6f73c3937049421514cca34;p=u-boot.git x86: provide mb() macro Implement a x86 memory barrier mb(). Furthermore, remove the previously used mfence() function, which does the same thing. The mb() macro is now equivalent to Linux (v6.9): linux/arch/x86/include/asm/barrier.h Signed-off-by: Philip Oberfichtner Reviewed-by: Simon Glass --- diff --git a/arch/x86/cpu/mp_init.c b/arch/x86/cpu/mp_init.c index aa1f47d722..e2e1849c06 100644 --- a/arch/x86/cpu/mp_init.c +++ b/arch/x86/cpu/mp_init.c @@ -164,12 +164,12 @@ static inline void barrier_wait(atomic_t *b) { while (atomic_read(b) == 0) asm("pause"); - mfence(); + mb(); } static inline void release_barrier(atomic_t *b) { - mfence(); + mb(); atomic_set(b, 1); } @@ -631,7 +631,7 @@ static int run_ap_work(struct mp_callback *callback, struct udevice *bsp, if (cur_cpu != i) store_callback(&ap_callbacks[i], callback); } - mfence(); + mb(); /* Wait for all the APs to signal back that call has been accepted. */ start = get_timer(0); @@ -656,7 +656,7 @@ static int run_ap_work(struct mp_callback *callback, struct udevice *bsp, } while (cpus_accepted != num_aps); /* Make sure we can see any data written by the APs */ - mfence(); + mb(); return 0; } @@ -692,7 +692,7 @@ static int ap_wait_for_instruction(struct udevice *cpu, void *unused) /* Copy to local variable before using the value */ memcpy(&lcb, cb, sizeof(lcb)); - mfence(); + mb(); if (lcb.logical_cpu_number == MP_SELECT_ALL || lcb.logical_cpu_number == MP_SELECT_APS || dev_seq(cpu) == lcb.logical_cpu_number) diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h index 073f80b07f..87e0c6f12b 100644 --- a/arch/x86/include/asm/cpu.h +++ b/arch/x86/include/asm/cpu.h @@ -185,11 +185,6 @@ static inline int flag_is_changeable_p(uint32_t flag) } #endif -static inline void mfence(void) -{ - __asm__ __volatile__("mfence" : : : "memory"); -} - /** * cpu_enable_paging_pae() - Enable PAE-paging * diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index c6d90eb794..1390193f09 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -240,6 +240,7 @@ static inline void sync(void) * have some advantages to use them instead of the simple one here. */ #define dmb() __asm__ __volatile__ ("" : : : "memory") +#define mb() __asm__ __volatile__ ("mfence" : : : "memory") #define __iormb() dmb() #define __iowmb() dmb()