msr csselr_el1, x12 /* select cache level */
isb /* sync change of cssidr_el1 */
mrs x6, ccsidr_el1 /* read the new cssidr_el1 */
- and x2, x6, #7 /* x2 <- log2(cache line size)-4 */
+ ubfx x2, x6, #0, #3 /* x2 <- log2(cache line size)-4 */
+ ubfx x3, x6, #3, #10 /* x3 <- number of cache ways - 1 */
+ ubfx x4, x6, #13, #15 /* x4 <- number of cache sets - 1 */
add x2, x2, #4 /* x2 <- log2(cache line size) */
- mov x3, #0x3ff
- and x3, x3, x6, lsr #3 /* x3 <- max number of #ways */
clz w5, w3 /* bit position of #ways */
- mov x4, #0x7fff
- and x4, x4, x6, lsr #13 /* x4 <- max number of #sets */
/* x12 <- cache level << 1 */
/* x2 <- line length offset */
/* x3 <- number of cache ways - 1 */
mov x1, x0
dsb sy
mrs x10, clidr_el1 /* read clidr_el1 */
- lsr x11, x10, #24
- and x11, x11, #0x7 /* x11 <- loc */
+ ubfx x11, x10, #24, #3 /* x11 <- loc */
cbz x11, finished /* if loc is 0, exit */
mov x15, lr
mov x0, #0 /* start flush at cache level 0 */
.pushsection .text.__asm_flush_dcache_range, "ax"
ENTRY(__asm_flush_dcache_range)
mrs x3, ctr_el0
- lsr x3, x3, #16
- and x3, x3, #0xf
+ ubfx x3, x3, #16, #4
mov x2, #4
lsl x2, x2, x3 /* cache line size */
.pushsection .text.__asm_invalidate_dcache_range, "ax"
ENTRY(__asm_invalidate_dcache_range)
mrs x3, ctr_el0
- ubfm x3, x3, #16, #19
+ ubfx x3, x3, #16, #4
mov x2, #4
lsl x2, x2, x3 /* cache line size */