1 /* SPDX-License-Identifier: GPL-2.0 2 * $Id: head.S,v 1.7 2003/09/01 17:58:19 letha 3 * 4 * arch/sh/kernel/head.S 5 * 6 * Copyright (C) 1999, 2000 Niibe Yutaka & K 7 * Copyright (C) 2010 Matt Fleming 8 * 9 * Head.S contains the SH exception handlers a 10 */ 11 #include <linux/init.h> 12 #include <linux/linkage.h> 13 #include <asm/thread_info.h> 14 #include <asm/mmu.h> 15 #include <cpu/mmu_context.h> 16 17 #ifdef CONFIG_CPU_SH4A 18 #define SYNCO() synco 19 20 #define PREFI(label, reg) \ 21 mov.l label, reg; \ 22 prefi @reg 23 #else 24 #define SYNCO() 25 #define PREFI(label, reg) 26 #endif 27 28 .section .empty_zero_page, "aw" 29 ENTRY(empty_zero_page) 30 .long 1 /* MOUNT_ROOT_ 31 .long 0 /* RAMDISK_FLA 32 .long 0x0200 /* ORIG_ROOT_D 33 .long 1 /* LOADER_TYPE 34 .long 0x00000000 /* INITRD_STAR 35 .long 0x00000000 /* INITRD_SIZE 36 #ifdef CONFIG_32BIT 37 .long 0x53453f00 + 32 /* "SE?" = 32 38 #else 39 .long 0x53453f00 + 29 /* "SE?" = 29 40 #endif 41 1: 42 .skip PAGE_SIZE - empty_zero_page - 43 44 __HEAD 45 46 /* 47 * Condition at the entry of _stext: 48 * 49 * BSC has already been initialized. 50 * INTC may or may not be initialized. 51 * VBR may or may not be initialized. 52 * MMU may or may not be initialized. 53 * Cache may or may not be initialized. 54 * Hardware (including on-chip modules) may 55 * 56 */ 57 ENTRY(_stext) 58 ! Initialize Sta 59 mov.l 1f, r0 ! MD=1, RB=0, 60 ldc r0, sr 61 ! Initialize glo 62 #ifdef CONFIG_CPU_HAS_SR_RB 63 mov #0, r0 64 ldc r0, r6_bank 65 #endif 66 67 #ifdef CONFIG_OF_EARLY_FLATTREE 68 mov r4, r12 ! Store device 69 #endif 70 71 /* 72 * Prefetch if possible to reduce cach 73 * 74 * We do this early on for SH-4A as a 75 * as later on we will have speculativ 76 * and this will become less of an iss 77 */ 78 PREFI(5f, r0) 79 PREFI(6f, r0) 80 81 ! 82 mov.l 2f, r0 83 mov r0, r15 ! Set initial 84 #ifdef CONFIG_CPU_HAS_SR_RB 85 mov.l 7f, r0 86 ldc r0, r7_bank ! ... and init 87 #endif 88 89 #ifdef CONFIG_PMB 90 /* 91 * Reconfigure the initial PMB mappings setup 92 * 93 * When we boot in 32-bit MMU mode there are 2 94 * setup for us. 95 * 96 * Entry VPN PPN V SZ 97 * ------------------------------------------- 98 * 0 0x80000000 0x00000000 1 512MB 99 * 1 0xA0000000 0x00000000 1 512MB 100 * 101 * But we reprogram them here because we want 102 * our address space and the initial mappings 103 * to __MEMORY_START (or even map all of our R 104 * 105 * Once we've setup cached and uncached mappin 106 * PMB entries. This clearing also deals with 107 * can persist across reboots. The PMB could h 108 * when the reboot occurred, so to be safe we 109 * with with a clean slate. 110 * 111 * The uncached mapping is constructed using t 112 * mapping with a single unbufferable page. On 113 * be covered via the uncached mapping so that 114 * run uncached. 115 * 116 * Drivers and the like that have previously a 117 * mapping are unsupported in 32-bit mode and 118 * preference when page tables are constructed 119 * 120 * This frees up the P2 space for more nefario 121 * 122 * Register utilization is as follows: 123 * 124 * r0 = PMB_DATA data field 125 * r1 = PMB_DATA address field 126 * r2 = PMB_ADDR data field 127 * r3 = PMB_ADDR address field 128 * r4 = PMB_E_SHIFT 129 * r5 = remaining amount of RAM to map 130 * r6 = PMB mapping size we're trying to 131 * r7 = cached_to_uncached 132 * r8 = scratch register 133 * r9 = scratch register 134 * r10 = number of PMB entries we've setu 135 * r11 = scratch register 136 */ 137 138 mov.l .LMMUCR, r1 /* Flush the T 139 mov.l @r1, r0 140 or #MMUCR_TI, r0 141 mov.l r0, @r1 142 143 mov.l .LMEMORY_SIZE, r5 144 145 mov #PMB_E_SHIFT, r0 146 mov #0x1, r4 147 shld r0, r4 148 149 mov.l .LFIRST_DATA_ENTRY, r0 150 mov.l .LPMB_DATA, r1 151 mov.l .LFIRST_ADDR_ENTRY, r2 152 mov.l .LPMB_ADDR, r3 153 154 /* 155 * First we need to walk the PMB and f 156 * existing mappings that match the in 157 * If these have already been establis 158 * don't bother setting up new entries 159 * initialization take care of things 160 * 161 * Note that we may need to coalesce a 162 * to reclaim more available PMB slots 163 * we want to do at this early stage. 164 */ 165 mov #0, r10 166 mov #NR_PMB_ENTRIES, r9 167 168 mov r1, r7 /* temporary P 169 170 .Lvalidate_existing_mappings: 171 172 mov.l .LPMB_DATA_MASK, r11 173 mov.l @r7, r8 174 and r11, r8 175 cmp/eq r0, r8 /* Check for v 176 bt .Lpmb_done 177 178 add #1, r10 /* Increment t 179 cmp/eq r9, r10 180 bf/s .Lvalidate_existing_mappings 181 add r4, r7 /* Increment t 182 183 /* 184 * If we've fallen through, continue w 185 * mappings. 186 */ 187 188 mov r5, r7 /* cached_to_u 189 mov #0, r10 190 191 #ifdef CONFIG_UNCACHED_MAPPING 192 /* 193 * Uncached mapping 194 */ 195 mov #(PMB_SZ_16M >> 2), r9 196 shll2 r9 197 198 mov #(PMB_UB >> 8), r8 199 shll8 r8 200 201 or r0, r8 202 or r9, r8 203 mov.l r8, @r1 204 mov r2, r8 205 add r7, r8 206 mov.l r8, @r3 207 208 add r4, r1 209 add r4, r3 210 add #1, r10 211 #endif 212 213 /* 214 * Iterate over all of the available sizes fro 215 * smallest for constructing the cached mappin 216 */ 217 #define __PMB_ITER_BY_SIZE(size) 218 .L##size: 219 mov #(size >> 4), r6; 220 shll16 r6; 221 shll8 r6; 222 223 cmp/hi r5, r6; 224 bt 9999f; 225 226 mov #(PMB_SZ_##size##M >> 2), r9; 227 shll2 r9; 228 229 /* 230 * Cached mapping 231 */ 232 mov #PMB_C, r8; 233 or r0, r8; 234 or r9, r8; 235 mov.l r8, @r1; 236 mov.l r2, @r3; 237 238 /* Increment to the next PMB_DATA entr 239 add r4, r1; 240 /* Increment to the next PMB_ADDR entr 241 add r4, r3; 242 /* Increment number of PMB entries */ 243 add #1, r10; 244 245 sub r6, r5; 246 add r6, r0; 247 add r6, r2; 248 249 bra .L##size; 250 9999: 251 252 __PMB_ITER_BY_SIZE(512) 253 __PMB_ITER_BY_SIZE(128) 254 __PMB_ITER_BY_SIZE(64) 255 __PMB_ITER_BY_SIZE(16) 256 257 #ifdef CONFIG_UNCACHED_MAPPING 258 /* 259 * Now that we can access it, update c 260 * uncached_size. 261 */ 262 mov.l .Lcached_to_uncached, r0 263 mov.l r7, @r0 264 265 mov.l .Luncached_size, r0 266 mov #1, r7 267 shll16 r7 268 shll8 r7 269 mov.l r7, @r0 270 #endif 271 272 /* 273 * Clear the remaining PMB entries. 274 * 275 * r3 = entry to begin clearing from 276 * r10 = number of entries we've setup 277 */ 278 mov #0, r1 279 mov #NR_PMB_ENTRIES, r0 280 281 .Lagain: 282 mov.l r1, @r3 /* Clear PMB_A 283 add #1, r10 /* Increment t 284 cmp/eq r0, r10 285 bf/s .Lagain 286 add r4, r3 /* Increment t 287 288 mov.l 6f, r0 289 icbi @r0 290 291 .Lpmb_done: 292 #endif /* CONFIG_PMB */ 293 294 #ifndef CONFIG_SH_NO_BSS_INIT 295 /* 296 * Don't clear BSS if running on slow 297 * remote memory via SHdebug link, etc 298 * to be all zero on boot anyway. 299 */ 300 ! Clear BSS ar 301 #ifdef CONFIG_SMP 302 mov.l 3f, r0 303 cmp/eq #0, r0 ! skip clear i 304 bt 10f 305 #endif 306 307 mov.l 3f, r1 308 add #4, r1 309 mov.l 4f, r2 310 mov #0, r0 311 9: cmp/hs r2, r1 312 bf/s 9b ! while (r1 < 313 mov.l r0,@-r2 314 315 10: 316 #endif 317 318 #ifdef CONFIG_OF_EARLY_FLATTREE 319 mov.l 8f, r0 ! Make flat de 320 jsr @r0 321 mov r12, r4 322 #endif 323 324 ! Additional CPU 325 mov.l 6f, r0 326 jsr @r0 327 nop 328 329 SYNCO() ! Wait for pen 330 331 ! Start kernel 332 mov.l 5f, r0 333 jmp @r0 334 nop 335 336 .balign 4 337 #if defined(CONFIG_CPU_SH2) 338 1: .long 0x000000F0 ! IMAS 339 #else 340 1: .long 0x500080F0 ! MD=1 341 #endif 342 ENTRY(stack_start) 343 2: .long init_thread_union+THREAD_SIZE 344 3: .long __bss_start 345 4: .long _end 346 5: .long start_kernel 347 6: .long cpu_init 348 7: .long init_thread_union 349 #if defined(CONFIG_OF_EARLY_FLATTREE) 350 8: .long sh_fdt_init 351 #endif 352 353 #ifdef CONFIG_PMB 354 .LPMB_ADDR: .long PMB_ADDR 355 .LPMB_DATA: .long PMB_DATA 356 .LPMB_DATA_MASK: .long PMB_PFN_MASK | 357 .LFIRST_ADDR_ENTRY: .long PAGE_OFFSET | 358 .LFIRST_DATA_ENTRY: .long __MEMORY_START 359 .LMMUCR: .long MMUCR 360 .LMEMORY_SIZE: .long __MEMORY_SIZE 361 #ifdef CONFIG_UNCACHED_MAPPING 362 .Lcached_to_uncached: .long cached_to_unca 363 .Luncached_size: .long uncached_size 364 #endif 365 #endif
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.