}
}
+void mmu_map_region(phys_addr_t addr, u64 size, bool emergency)
+{
+ u64 va_bits;
+ int level = 0;
+ u64 attrs = PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_INNER_SHARE;
+
+ attrs |= PTE_TYPE_BLOCK | PTE_BLOCK_AF;
+
+ get_tcr(NULL, &va_bits);
+ if (va_bits < 39)
+ level = 1;
+
+ if (emergency)
+ map_range(addr, addr, size, level,
+ (u64 *)gd->arch.tlb_emerg, attrs);
+
+ /* Switch pagetables while we update the primary one */
+ __asm_switch_ttbr(gd->arch.tlb_emerg);
+
+ map_range(addr, addr, size, level,
+ (u64 *)gd->arch.tlb_addr, attrs);
+
+ __asm_switch_ttbr(gd->arch.tlb_addr);
+}
+
static void add_map(struct mm_region *map)
{
u64 attrs = map->attrs | PTE_TYPE_BLOCK | PTE_BLOCK_AF;
void smp_kick_all_cpus(void);
void flush_l3_cache(void);
+
+/**
+ * mmu_map_region() - map a region of previously unmapped memory.
+ * Will be mapped MT_NORMAL & PTE_BLOCK_INNER_SHARE.
+ *
+ * @start: Start address of the region
+ * @size: Size of the region
+ * @emerg: Also map the region in the emergency table
+ */
+void mmu_map_region(phys_addr_t start, u64 size, bool emerg);
void mmu_change_region_attr(phys_addr_t start, size_t size, u64 attrs);
/*