1// SPDX-License-Identifier: GPL-2.0+ 2/* 3 * (C) Copyright 2002 4 * Wolfgang Denk, DENX Software Engineering, wd@denx.de. 5 */ 6 7#include <common.h> 8#include <cpu_func.h> 9#include <log.h> 10#include <asm/global_data.h> 11#include <asm/system.h> 12#include <asm/cache.h> 13#include <linux/compiler.h> 14#include <asm/armv7_mpu.h> 15 16#if !(CONFIG_IS_ENABLED(SYS_ICACHE_OFF) && CONFIG_IS_ENABLED(SYS_DCACHE_OFF)) 17 18DECLARE_GLOBAL_DATA_PTR; 19 20#ifdef CONFIG_SYS_ARM_MMU 21__weak void arm_init_before_mmu(void) 22{ 23} 24 25static void set_section_phys(int section, phys_addr_t phys, 26 enum dcache_option option) 27{ 28#ifdef CONFIG_ARMV7_LPAE 29 u64 *page_table = (u64 *)gd->arch.tlb_addr; 30 /* Need to set the access flag to not fault */ 31 u64 value = TTB_SECT_AP | TTB_SECT_AF; 32#else 33 u32 *page_table = (u32 *)gd->arch.tlb_addr; 34 u32 value = TTB_SECT_AP; 35#endif 36 37 /* Add the page offset */ 38 value |= phys; 39 40 /* Add caching bits */ 41 value |= option; 42 43 /* Set PTE */ 44 page_table[section] = value; 45} 46 47void set_section_dcache(int section, enum dcache_option option) 48{ 49 set_section_phys(section, (u32)section << MMU_SECTION_SHIFT, option); 50} 51 52__weak void mmu_page_table_flush(unsigned long start, unsigned long stop) 53{ 54 debug("%s: Warning: not implemented\n", __func__); 55} 56 57void mmu_set_region_dcache_behaviour_phys(phys_addr_t start, phys_addr_t phys, 58 size_t size, enum dcache_option option) 59{ 60#ifdef CONFIG_ARMV7_LPAE 61 u64 *page_table = (u64 *)gd->arch.tlb_addr; 62#else 63 u32 *page_table = (u32 *)gd->arch.tlb_addr; 64#endif 65 unsigned long startpt, stoppt; 66 unsigned long upto, end; 67 68 /* div by 2 before start + size to avoid phys_addr_t overflow */ 69 end = ALIGN((start / 2) + (size / 2), MMU_SECTION_SIZE / 2) 70 >> (MMU_SECTION_SHIFT - 1); 71 start = start >> MMU_SECTION_SHIFT; 72 73#ifdef CONFIG_ARMV7_LPAE 74 debug("%s: start=%pa, size=%zu, option=%llx\n", __func__, &start, size, 75 option); 76#else 77 debug("%s: start=%pa, size=%zu, option=0x%x\n", __func__, &start, size, 78 option); 79#endif 80 for (upto = start; upto < end; upto++, phys += MMU_SECTION_SIZE) 81 set_section_phys(upto, phys, option); 82 83 /* 84 * Make sure range is cache line aligned 85 * Only CPU maintains page tables, hence it is safe to always 86 * flush complete cache lines... 87 */ 88 89 startpt = (unsigned long)&page_table[start]; 90 startpt &= ~(CONFIG_SYS_CACHELINE_SIZE - 1); 91 stoppt = (unsigned long)&page_table[end]; 92 stoppt = ALIGN(stoppt, CONFIG_SYS_CACHELINE_SIZE); 93 mmu_page_table_flush(startpt, stoppt); 94} 95 96__weak void dram_bank_mmu_setup(int bank) 97{ 98 struct bd_info *bd = gd->bd; 99 int i; 100 101 /* bd->bi_dram is available only after relocation */ 102 if ((gd->flags & GD_FLG_RELOC) == 0) 103 return; 104 105 debug("%s: bank: %d\n", __func__, bank); 106 for (i = bd->bi_dram[bank].start >> MMU_SECTION_SHIFT; 107 i < (bd->bi_dram[bank].start >> MMU_SECTION_SHIFT) + 108 (bd->bi_dram[bank].size >> MMU_SECTION_SHIFT); 109 i++) 110 set_section_dcache(i, DCACHE_DEFAULT_OPTION); 111} 112 113/* to activate the MMU we need to set up virtual memory: use 1M areas */ 114static inline void mmu_setup(void) 115{ 116 int i; 117 u32 reg; 118 119 arm_init_before_mmu(); 120 /* Set up an identity-mapping for all 4GB, rw for everyone */ 121 for (i = 0; i < ((4096ULL * 1024 * 1024) >> MMU_SECTION_SHIFT); i++) 122 set_section_dcache(i, DCACHE_OFF); 123 124 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) { 125 dram_bank_mmu_setup(i); 126 } 127 128#if defined(CONFIG_ARMV7_LPAE) && __LINUX_ARM_ARCH__ != 4 129 /* Set up 4 PTE entries pointing to our 4 1GB page tables */ 130 for (i = 0; i < 4; i++) { 131 u64 *page_table = (u64 *)(gd->arch.tlb_addr + (4096 * 4)); 132 u64 tpt = gd->arch.tlb_addr + (4096 * i); 133 page_table[i] = tpt | TTB_PAGETABLE; 134 } 135 136 reg = TTBCR_EAE; 137#if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH) 138 reg |= TTBCR_ORGN0_WT | TTBCR_IRGN0_WT; 139#elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC) 140 reg |= TTBCR_ORGN0_WBWA | TTBCR_IRGN0_WBWA; 141#else 142 reg |= TTBCR_ORGN0_WBNWA | TTBCR_IRGN0_WBNWA; 143#endif 144 145 if (is_hyp()) { 146 /* Set HTCR to enable LPAE */ 147 asm volatile("mcr p15, 4, %0, c2, c0, 2" 148 : : "r" (reg) : "memory"); 149 /* Set HTTBR0 */ 150 asm volatile("mcrr p15, 4, %0, %1, c2" 151 : 152 : "r"(gd->arch.tlb_addr + (4096 * 4)), "r"(0) 153 : "memory"); 154 /* Set HMAIR */ 155 asm volatile("mcr p15, 4, %0, c10, c2, 0" 156 : : "r" (MEMORY_ATTRIBUTES) : "memory"); 157 } else { 158 /* Set TTBCR to enable LPAE */ 159 asm volatile("mcr p15, 0, %0, c2, c0, 2" 160 : : "r" (reg) : "memory"); 161 /* Set 64-bit TTBR0 */ 162 asm volatile("mcrr p15, 0, %0, %1, c2" 163 : 164 : "r"(gd->arch.tlb_addr + (4096 * 4)), "r"(0) 165 : "memory"); 166 /* Set MAIR */ 167 asm volatile("mcr p15, 0, %0, c10, c2, 0" 168 : : "r" (MEMORY_ATTRIBUTES) : "memory"); 169 } 170#elif defined(CONFIG_CPU_V7A) 171 if (is_hyp()) { 172 /* Set HTCR to disable LPAE */ 173 asm volatile("mcr p15, 4, %0, c2, c0, 2" 174 : : "r" (0) : "memory"); 175 } else { 176 /* Set TTBCR to disable LPAE */ 177 asm volatile("mcr p15, 0, %0, c2, c0, 2" 178 : : "r" (0) : "memory"); 179 } 180 /* Set TTBR0 */ 181 reg = gd->arch.tlb_addr & TTBR0_BASE_ADDR_MASK; 182#if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH) 183 reg |= TTBR0_RGN_WT | TTBR0_IRGN_WT; 184#elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC) 185 reg |= TTBR0_RGN_WBWA | TTBR0_IRGN_WBWA; 186#else 187 reg |= TTBR0_RGN_WB | TTBR0_IRGN_WB; 188#endif 189 asm volatile("mcr p15, 0, %0, c2, c0, 0" 190 : : "r" (reg) : "memory"); 191#else 192 /* Copy the page table address to cp15 */ 193 asm volatile("mcr p15, 0, %0, c2, c0, 0" 194 : : "r" (gd->arch.tlb_addr) : "memory"); 195#endif 196 /* 197 * initial value of Domain Access Control Register (DACR) 198 * Set the access control to client (1U) for each of the 16 domains 199 */ 200 asm volatile("mcr p15, 0, %0, c3, c0, 0" 201 : : "r" (0x55555555)); 202 203 /* and enable the mmu */ 204 reg = get_cr(); /* get control reg. */ 205 set_cr(reg | CR_M); 206} 207 208static int mmu_enabled(void) 209{ 210 return get_cr() & CR_M; 211} 212#endif /* CONFIG_SYS_ARM_MMU */ 213 214/* cache_bit must be either CR_I or CR_C */ 215static void cache_enable(uint32_t cache_bit) 216{ 217 uint32_t reg; 218 219 /* The data cache is not active unless the mmu/mpu is enabled too */ 220#ifdef CONFIG_SYS_ARM_MMU 221 if ((cache_bit == CR_C) && !mmu_enabled()) 222 mmu_setup(); 223#elif defined(CONFIG_SYS_ARM_MPU) 224 if ((cache_bit == CR_C) && !mpu_enabled()) { 225 printf("Consider enabling MPU before enabling caches\n"); 226 return; 227 } 228#endif 229 reg = get_cr(); /* get control reg. */ 230 set_cr(reg | cache_bit); 231} 232 233/* cache_bit must be either CR_I or CR_C */ 234static void cache_disable(uint32_t cache_bit) 235{ 236 uint32_t reg; 237 238 reg = get_cr(); 239 240 if (cache_bit == CR_C) { 241 /* if cache isn;t enabled no need to disable */ 242 if ((reg & CR_C) != CR_C) 243 return; 244#ifdef CONFIG_SYS_ARM_MMU 245 /* if disabling data cache, disable mmu too */ 246 cache_bit |= CR_M; 247#endif 248 } 249 reg = get_cr(); 250 251#ifdef CONFIG_SYS_ARM_MMU 252 if (cache_bit == (CR_C | CR_M)) 253#elif defined(CONFIG_SYS_ARM_MPU) 254 if (cache_bit == CR_C) 255#endif 256 flush_dcache_all(); 257 set_cr(reg & ~cache_bit); 258} 259#endif 260 261#if CONFIG_IS_ENABLED(SYS_ICACHE_OFF) 262void icache_enable(void) 263{ 264 return; 265} 266 267void icache_disable(void) 268{ 269 return; 270} 271 272int icache_status(void) 273{ 274 return 0; /* always off */ 275} 276#else 277void icache_enable(void) 278{ 279 cache_enable(CR_I); 280} 281 282void icache_disable(void) 283{ 284 cache_disable(CR_I); 285} 286 287int icache_status(void) 288{ 289 return (get_cr() & CR_I) != 0; 290} 291#endif 292 293#if CONFIG_IS_ENABLED(SYS_DCACHE_OFF) 294void dcache_enable(void) 295{ 296 return; 297} 298 299void dcache_disable(void) 300{ 301 return; 302} 303 304int dcache_status(void) 305{ 306 return 0; /* always off */ 307} 308 309void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size, 310 enum dcache_option option) 311{ 312} 313 314#else 315void dcache_enable(void) 316{ 317 cache_enable(CR_C); 318} 319 320void dcache_disable(void) 321{ 322 cache_disable(CR_C); 323} 324 325int dcache_status(void) 326{ 327 return (get_cr() & CR_C) != 0; 328} 329 330void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size, 331 enum dcache_option option) 332{ 333 mmu_set_region_dcache_behaviour_phys(start, start, size, option); 334} 335#endif 336