start.S 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694
  1. /*
  2. * Copyright (C) 1998 Dan Malek <dmalek@jlc.net>
  3. * Copyright (C) 1999 Magnus Damm <kieraypc01.p.y.kie.era.ericsson.se>
  4. * Copyright (C) 2000-2009 Wolfgang Denk <wd@denx.de>
  5. * Copyright Freescale Semiconductor, Inc. 2004, 2006.
  6. *
  7. * SPDX-License-Identifier: GPL-2.0+
  8. *
  9. * Based on the MPC83xx code.
  10. */
  11. /*
  12. * U-Boot - Startup Code for MPC512x based Embedded Boards
  13. */
  14. #include <asm-offsets.h>
  15. #include <config.h>
  16. #include <version.h>
  17. #define CONFIG_521X 1 /* needed for Linux kernel header files*/
  18. #include <asm/immap_512x.h>
  19. #include "asm-offsets.h"
  20. #include <ppc_asm.tmpl>
  21. #include <ppc_defs.h>
  22. #include <asm/cache.h>
  23. #include <asm/mmu.h>
  24. #include <asm/u-boot.h>
  25. /*
  26. * Floating Point enable, Machine Check and Recoverable Interr.
  27. */
  28. #undef MSR_KERNEL
  29. #ifdef DEBUG
  30. #define MSR_KERNEL (MSR_FP|MSR_RI)
  31. #else
  32. #define MSR_KERNEL (MSR_FP|MSR_ME|MSR_RI)
  33. #endif
  34. /* Macros for manipulating CSx_START/STOP */
  35. #define START_REG(start) ((start) >> 16)
  36. #define STOP_REG(start, size) (((start) + (size) - 1) >> 16)
  37. /*
  38. * Set up GOT: Global Offset Table
  39. *
  40. * Use r12 to access the GOT
  41. */
  42. START_GOT
  43. GOT_ENTRY(_GOT2_TABLE_)
  44. GOT_ENTRY(_FIXUP_TABLE_)
  45. GOT_ENTRY(_start)
  46. GOT_ENTRY(_start_of_vectors)
  47. GOT_ENTRY(_end_of_vectors)
  48. GOT_ENTRY(transfer_to_handler)
  49. GOT_ENTRY(__init_end)
  50. GOT_ENTRY(__bss_end)
  51. GOT_ENTRY(__bss_start)
  52. END_GOT
  53. /*
  54. * Magic number and version string
  55. */
  56. .long 0x27051956 /* U-Boot Magic Number */
  57. .globl version_string
  58. version_string:
  59. .ascii U_BOOT_VERSION_STRING, "\0"
  60. /*
  61. * Vector Table
  62. */
  63. .text
  64. . = EXC_OFF_SYS_RESET
  65. .globl _start
  66. /* Start from here after reset/power on */
  67. _start:
  68. b boot_cold
  69. .globl _start_of_vectors
  70. _start_of_vectors:
  71. /* Machine check */
  72. STD_EXCEPTION(0x200, MachineCheck, MachineCheckException)
  73. /* Data Storage exception. */
  74. STD_EXCEPTION(0x300, DataStorage, UnknownException)
  75. /* Instruction Storage exception. */
  76. STD_EXCEPTION(0x400, InstStorage, UnknownException)
  77. /* External Interrupt exception. */
  78. STD_EXCEPTION(0x500, ExtInterrupt, UnknownException)
  79. /* Alignment exception. */
  80. . = 0x600
  81. Alignment:
  82. EXCEPTION_PROLOG(SRR0, SRR1)
  83. mfspr r4,DAR
  84. stw r4,_DAR(r21)
  85. mfspr r5,DSISR
  86. stw r5,_DSISR(r21)
  87. addi r3,r1,STACK_FRAME_OVERHEAD
  88. EXC_XFER_TEMPLATE(Alignment, AlignmentException, MSR_KERNEL, COPY_EE)
  89. /* Program check exception */
  90. . = 0x700
  91. ProgramCheck:
  92. EXCEPTION_PROLOG(SRR0, SRR1)
  93. addi r3,r1,STACK_FRAME_OVERHEAD
  94. EXC_XFER_TEMPLATE(ProgramCheck, ProgramCheckException,
  95. MSR_KERNEL, COPY_EE)
  96. /* Floating Point Unit unavailable exception */
  97. STD_EXCEPTION(0x800, FPUnavailable, UnknownException)
  98. /* Decrementer */
  99. STD_EXCEPTION(0x900, Decrementer, timer_interrupt)
  100. /* Critical interrupt */
  101. STD_EXCEPTION(0xa00, Critical, UnknownException)
  102. /* System Call */
  103. STD_EXCEPTION(0xc00, SystemCall, UnknownException)
  104. /* Trace interrupt */
  105. STD_EXCEPTION(0xd00, Trace, UnknownException)
  106. /* Performance Monitor interrupt */
  107. STD_EXCEPTION(0xf00, PerfMon, UnknownException)
  108. /* Intruction Translation Miss */
  109. STD_EXCEPTION(0x1000, InstructionTLBMiss, UnknownException)
  110. /* Data Load Translation Miss */
  111. STD_EXCEPTION(0x1100, DataLoadTLBMiss, UnknownException)
  112. /* Data Store Translation Miss */
  113. STD_EXCEPTION(0x1200, DataStoreTLBMiss, UnknownException)
  114. /* Instruction Address Breakpoint */
  115. STD_EXCEPTION(0x1300, InstructionAddrBreakpoint, DebugException)
  116. /* System Management interrupt */
  117. STD_EXCEPTION(0x1400, SystemMgmtInterrupt, UnknownException)
  118. .globl _end_of_vectors
  119. _end_of_vectors:
  120. . = 0x3000
  121. boot_cold:
  122. /* Save msr contents */
  123. mfmsr r5
  124. /* Set IMMR area to our preferred location */
  125. lis r4, CONFIG_DEFAULT_IMMR@h
  126. lis r3, CONFIG_SYS_IMMR@h
  127. ori r3, r3, CONFIG_SYS_IMMR@l
  128. stw r3, IMMRBAR(r4)
  129. mtspr MBAR, r3 /* IMMRBAR is mirrored into the MBAR SPR (311) */
  130. /* Initialise the machine */
  131. bl cpu_early_init
  132. /*
  133. * Set up Local Access Windows:
  134. *
  135. * 1) Boot/CS0 (boot FLASH)
  136. * 2) On-chip SRAM (initial stack purposes)
  137. */
  138. /* Boot CS/CS0 window range */
  139. lis r3, CONFIG_SYS_IMMR@h
  140. ori r3, r3, CONFIG_SYS_IMMR@l
  141. lis r4, START_REG(CONFIG_SYS_FLASH_BASE)
  142. ori r4, r4, STOP_REG(CONFIG_SYS_FLASH_BASE, CONFIG_SYS_FLASH_SIZE)
  143. stw r4, LPCS0AW(r3)
  144. /*
  145. * The SRAM window has a fixed size (256K), so only the start address
  146. * is necessary
  147. */
  148. lis r4, START_REG(CONFIG_SYS_SRAM_BASE) & 0xff00
  149. stw r4, SRAMBAR(r3)
  150. /*
  151. * According to MPC5121e RM, configuring local access windows should
  152. * be followed by a dummy read of the config register that was
  153. * modified last and an isync
  154. */
  155. lwz r4, SRAMBAR(r3)
  156. isync
  157. /*
  158. * Set configuration of the Boot/CS0, the SRAM window does not have a
  159. * config register so no params can be set for it
  160. */
  161. lis r3, (CONFIG_SYS_IMMR + LPC_OFFSET)@h
  162. ori r3, r3, (CONFIG_SYS_IMMR + LPC_OFFSET)@l
  163. lis r4, CONFIG_SYS_CS0_CFG@h
  164. ori r4, r4, CONFIG_SYS_CS0_CFG@l
  165. stw r4, CS0_CONFIG(r3)
  166. /* Master enable all CS's */
  167. lis r4, CS_CTRL_ME@h
  168. ori r4, r4, CS_CTRL_ME@l
  169. stw r4, CS_CTRL(r3)
  170. lis r4, (CONFIG_SYS_MONITOR_BASE)@h
  171. ori r4, r4, (CONFIG_SYS_MONITOR_BASE)@l
  172. addi r5, r4, in_flash - _start + EXC_OFF_SYS_RESET
  173. mtlr r5
  174. blr
  175. in_flash:
  176. lis r1, (CONFIG_SYS_INIT_RAM_ADDR + CONFIG_SYS_GBL_DATA_OFFSET)@h
  177. ori r1, r1, (CONFIG_SYS_INIT_RAM_ADDR + CONFIG_SYS_GBL_DATA_OFFSET)@l
  178. li r0, 0 /* Make room for stack frame header and */
  179. stwu r0, -4(r1) /* clear final stack frame so that */
  180. stwu r0, -4(r1) /* stack backtraces terminate cleanly */
  181. /* let the C-code set up the rest */
  182. /* */
  183. /* Be careful to keep code relocatable & stack humble */
  184. /*------------------------------------------------------*/
  185. GET_GOT /* initialize GOT access */
  186. /* r3: IMMR */
  187. lis r3, CONFIG_SYS_IMMR@h
  188. /* run low-level CPU init code (in Flash) */
  189. bl cpu_init_f
  190. /* run 1st part of board init code (in Flash) */
  191. bl board_init_f
  192. /* NOTREACHED - board_init_f() does not return */
  193. /*
  194. * This code finishes saving the registers to the exception frame
  195. * and jumps to the appropriate handler for the exception.
  196. * Register r21 is pointer into trap frame, r1 has new stack pointer.
  197. */
  198. .globl transfer_to_handler
  199. transfer_to_handler:
  200. stw r22,_NIP(r21)
  201. lis r22,MSR_POW@h
  202. andc r23,r23,r22
  203. stw r23,_MSR(r21)
  204. SAVE_GPR(7, r21)
  205. SAVE_4GPRS(8, r21)
  206. SAVE_8GPRS(12, r21)
  207. SAVE_8GPRS(24, r21)
  208. mflr r23
  209. andi. r24,r23,0x3f00 /* get vector offset */
  210. stw r24,TRAP(r21)
  211. li r22,0
  212. stw r22,RESULT(r21)
  213. lwz r24,0(r23) /* virtual address of handler */
  214. lwz r23,4(r23) /* where to go when done */
  215. mtspr SRR0,r24
  216. mtspr SRR1,r20
  217. mtlr r23
  218. SYNC
  219. rfi /* jump to handler, enable MMU */
  220. int_return:
  221. mfmsr r28 /* Disable interrupts */
  222. li r4,0
  223. ori r4,r4,MSR_EE
  224. andc r28,r28,r4
  225. SYNC /* Some chip revs need this... */
  226. mtmsr r28
  227. SYNC
  228. lwz r2,_CTR(r1)
  229. lwz r0,_LINK(r1)
  230. mtctr r2
  231. mtlr r0
  232. lwz r2,_XER(r1)
  233. lwz r0,_CCR(r1)
  234. mtspr XER,r2
  235. mtcrf 0xFF,r0
  236. REST_10GPRS(3, r1)
  237. REST_10GPRS(13, r1)
  238. REST_8GPRS(23, r1)
  239. REST_GPR(31, r1)
  240. lwz r2,_NIP(r1) /* Restore environment */
  241. lwz r0,_MSR(r1)
  242. mtspr SRR0,r2
  243. mtspr SRR1,r0
  244. lwz r0,GPR0(r1)
  245. lwz r2,GPR2(r1)
  246. lwz r1,GPR1(r1)
  247. SYNC
  248. rfi
  249. /*
  250. * This code initialises the machine, it expects original MSR contents to be in r5.
  251. */
  252. cpu_early_init:
  253. /* Initialize machine status; enable machine check interrupt */
  254. /*-----------------------------------------------------------*/
  255. li r3, MSR_KERNEL /* Set ME and RI flags */
  256. rlwimi r3, r5, 0, 25, 25 /* preserve IP bit */
  257. #ifdef DEBUG
  258. rlwimi r3, r5, 0, 21, 22 /* debugger might set SE, BE bits */
  259. #endif
  260. mtmsr r3
  261. SYNC
  262. mtspr SRR1, r3 /* Mirror current MSR state in SRR1 */
  263. lis r3, CONFIG_SYS_IMMR@h
  264. #if defined(CONFIG_WATCHDOG)
  265. /* Initialise the watchdog and reset it */
  266. /*--------------------------------------*/
  267. lis r4, CONFIG_SYS_WATCHDOG_VALUE
  268. ori r4, r4, (SWCRR_SWEN | SWCRR_SWRI | SWCRR_SWPR)
  269. stw r4, SWCRR(r3)
  270. /* reset */
  271. li r4, 0x556C
  272. sth r4, SWSRR@l(r3)
  273. li r4, 0x0
  274. ori r4, r4, 0xAA39
  275. sth r4, SWSRR@l(r3)
  276. #else
  277. /* Disable the watchdog */
  278. /*----------------------*/
  279. lwz r4, SWCRR(r3)
  280. /*
  281. * Check to see if it's enabled for disabling: once disabled by s/w
  282. * it's not possible to re-enable it
  283. */
  284. andi. r4, r4, 0x4
  285. beq 1f
  286. xor r4, r4, r4
  287. stw r4, SWCRR(r3)
  288. 1:
  289. #endif /* CONFIG_WATCHDOG */
  290. /* Initialize the Hardware Implementation-dependent Registers */
  291. /* HID0 also contains cache control */
  292. /*------------------------------------------------------*/
  293. lis r3, CONFIG_SYS_HID0_INIT@h
  294. ori r3, r3, CONFIG_SYS_HID0_INIT@l
  295. SYNC
  296. mtspr HID0, r3
  297. lis r3, CONFIG_SYS_HID0_FINAL@h
  298. ori r3, r3, CONFIG_SYS_HID0_FINAL@l
  299. SYNC
  300. mtspr HID0, r3
  301. lis r3, CONFIG_SYS_HID2@h
  302. ori r3, r3, CONFIG_SYS_HID2@l
  303. SYNC
  304. mtspr HID2, r3
  305. sync
  306. blr
  307. /* Cache functions.
  308. *
  309. * Note: requires that all cache bits in
  310. * HID0 are in the low half word.
  311. */
  312. .globl icache_enable
  313. icache_enable:
  314. mfspr r3, HID0
  315. ori r3, r3, HID0_ICE
  316. lis r4, 0
  317. ori r4, r4, HID0_ILOCK
  318. andc r3, r3, r4
  319. ori r4, r3, HID0_ICFI
  320. isync
  321. mtspr HID0, r4 /* sets enable and invalidate, clears lock */
  322. isync
  323. mtspr HID0, r3 /* clears invalidate */
  324. blr
  325. .globl icache_disable
  326. icache_disable:
  327. mfspr r3, HID0
  328. lis r4, 0
  329. ori r4, r4, HID0_ICE|HID0_ILOCK
  330. andc r3, r3, r4
  331. ori r4, r3, HID0_ICFI
  332. isync
  333. mtspr HID0, r4 /* sets invalidate, clears enable and lock*/
  334. isync
  335. mtspr HID0, r3 /* clears invalidate */
  336. blr
  337. .globl icache_status
  338. icache_status:
  339. mfspr r3, HID0
  340. rlwinm r3, r3, (31 - HID0_ICE_SHIFT + 1), 31, 31
  341. blr
  342. .globl dcache_enable
  343. dcache_enable:
  344. mfspr r3, HID0
  345. li r5, HID0_DCFI|HID0_DLOCK
  346. andc r3, r3, r5
  347. mtspr HID0, r3 /* no invalidate, unlock */
  348. ori r3, r3, HID0_DCE
  349. ori r5, r3, HID0_DCFI
  350. mtspr HID0, r5 /* enable + invalidate */
  351. mtspr HID0, r3 /* enable */
  352. sync
  353. blr
  354. .globl dcache_disable
  355. dcache_disable:
  356. mfspr r3, HID0
  357. lis r4, 0
  358. ori r4, r4, HID0_DCE|HID0_DLOCK
  359. andc r3, r3, r4
  360. ori r4, r3, HID0_DCI
  361. sync
  362. mtspr HID0, r4 /* sets invalidate, clears enable and lock */
  363. sync
  364. mtspr HID0, r3 /* clears invalidate */
  365. blr
  366. .globl dcache_status
  367. dcache_status:
  368. mfspr r3, HID0
  369. rlwinm r3, r3, (31 - HID0_DCE_SHIFT + 1), 31, 31
  370. blr
  371. .globl get_pvr
  372. get_pvr:
  373. mfspr r3, PVR
  374. blr
  375. .globl get_svr
  376. get_svr:
  377. mfspr r3, SVR
  378. blr
  379. /*-------------------------------------------------------------------*/
  380. /*
  381. * void relocate_code (addr_sp, gd, addr_moni)
  382. *
  383. * This "function" does not return, instead it continues in RAM
  384. * after relocating the monitor code.
  385. *
  386. * r3 = dest
  387. * r4 = src
  388. * r5 = length in bytes
  389. * r6 = cachelinesize
  390. */
  391. .globl relocate_code
  392. relocate_code:
  393. mr r1, r3 /* Set new stack pointer */
  394. mr r9, r4 /* Save copy of Global Data pointer */
  395. mr r10, r5 /* Save copy of Destination Address */
  396. GET_GOT
  397. mr r3, r5 /* Destination Address */
  398. lis r4, CONFIG_SYS_MONITOR_BASE@h /* Source Address */
  399. ori r4, r4, CONFIG_SYS_MONITOR_BASE@l
  400. lwz r5, GOT(__init_end)
  401. sub r5, r5, r4
  402. li r6, CONFIG_SYS_CACHELINE_SIZE /* Cache Line Size */
  403. /*
  404. * Fix GOT pointer:
  405. *
  406. * New GOT-PTR = (old GOT-PTR - CONFIG_SYS_MONITOR_BASE)
  407. * + Destination Address
  408. *
  409. * Offset:
  410. */
  411. sub r15, r10, r4
  412. /* First our own GOT */
  413. add r12, r12, r15
  414. /* then the one used by the C code */
  415. add r30, r30, r15
  416. /*
  417. * Now relocate code
  418. */
  419. cmplw cr1,r3,r4
  420. addi r0,r5,3
  421. srwi. r0,r0,2
  422. beq cr1,4f /* In place copy is not necessary */
  423. beq 7f /* Protect against 0 count */
  424. mtctr r0
  425. bge cr1,2f
  426. la r8,-4(r4)
  427. la r7,-4(r3)
  428. /* copy */
  429. 1: lwzu r0,4(r8)
  430. stwu r0,4(r7)
  431. bdnz 1b
  432. addi r0,r5,3
  433. srwi. r0,r0,2
  434. mtctr r0
  435. la r8,-4(r4)
  436. la r7,-4(r3)
  437. /* and compare */
  438. 20: lwzu r20,4(r8)
  439. lwzu r21,4(r7)
  440. xor. r22, r20, r21
  441. bne 30f
  442. bdnz 20b
  443. b 4f
  444. /* compare failed */
  445. 30: li r3, 0
  446. blr
  447. 2: slwi r0,r0,2 /* re copy in reverse order ... y do we needed it? */
  448. add r8,r4,r0
  449. add r7,r3,r0
  450. 3: lwzu r0,-4(r8)
  451. stwu r0,-4(r7)
  452. bdnz 3b
  453. /*
  454. * Now flush the cache: note that we must start from a cache aligned
  455. * address. Otherwise we might miss one cache line.
  456. */
  457. 4: cmpwi r6,0
  458. add r5,r3,r5
  459. beq 7f /* Always flush prefetch queue in any case */
  460. subi r0,r6,1
  461. andc r3,r3,r0
  462. mr r4,r3
  463. 5: dcbst 0,r4
  464. add r4,r4,r6
  465. cmplw r4,r5
  466. blt 5b
  467. sync /* Wait for all dcbst to complete on bus */
  468. mr r4,r3
  469. 6: icbi 0,r4
  470. add r4,r4,r6
  471. cmplw r4,r5
  472. blt 6b
  473. 7: sync /* Wait for all icbi to complete on bus */
  474. isync
  475. /*
  476. * We are done. Do not return, instead branch to second part of board
  477. * initialization, now running from RAM.
  478. */
  479. addi r0, r10, in_ram - _start + EXC_OFF_SYS_RESET
  480. mtlr r0
  481. blr
  482. in_ram:
  483. /*
  484. * Relocation Function, r12 point to got2+0x8000
  485. *
  486. * Adjust got2 pointers, no need to check for 0, this code
  487. * already puts a few entries in the table.
  488. */
  489. li r0,__got2_entries@sectoff@l
  490. la r3,GOT(_GOT2_TABLE_)
  491. lwz r11,GOT(_GOT2_TABLE_)
  492. mtctr r0
  493. sub r11,r3,r11
  494. addi r3,r3,-4
  495. 1: lwzu r0,4(r3)
  496. cmpwi r0,0
  497. beq- 2f
  498. add r0,r0,r11
  499. stw r0,0(r3)
  500. 2: bdnz 1b
  501. /*
  502. * Now adjust the fixups and the pointers to the fixups
  503. * in case we need to move ourselves again.
  504. */
  505. li r0,__fixup_entries@sectoff@l
  506. lwz r3,GOT(_FIXUP_TABLE_)
  507. cmpwi r0,0
  508. mtctr r0
  509. addi r3,r3,-4
  510. beq 4f
  511. 3: lwzu r4,4(r3)
  512. lwzux r0,r4,r11
  513. cmpwi r0,0
  514. add r0,r0,r11
  515. stw r4,0(r3)
  516. beq- 5f
  517. stw r0,0(r4)
  518. 5: bdnz 3b
  519. 4:
  520. clear_bss:
  521. /*
  522. * Now clear BSS segment
  523. */
  524. lwz r3,GOT(__bss_start)
  525. lwz r4,GOT(__bss_end)
  526. cmplw 0, r3, r4
  527. beq 6f
  528. li r0, 0
  529. 5:
  530. stw r0, 0(r3)
  531. addi r3, r3, 4
  532. cmplw 0, r3, r4
  533. bne 5b
  534. 6:
  535. mr r3, r9 /* Global Data pointer */
  536. mr r4, r10 /* Destination Address */
  537. bl board_init_r
  538. /*
  539. * Copy exception vector code to low memory
  540. *
  541. * r3: dest_addr
  542. * r7: source address, r8: end address, r9: target address
  543. */
  544. .globl trap_init
  545. trap_init:
  546. mflr r4 /* save link register */
  547. GET_GOT
  548. lwz r7, GOT(_start)
  549. lwz r8, GOT(_end_of_vectors)
  550. li r9, 0x100 /* reset vector at 0x100 */
  551. cmplw 0, r7, r8
  552. bgelr /* return if r7>=r8 - just in case */
  553. 1:
  554. lwz r0, 0(r7)
  555. stw r0, 0(r9)
  556. addi r7, r7, 4
  557. addi r9, r9, 4
  558. cmplw 0, r7, r8
  559. bne 1b
  560. /*
  561. * relocate `hdlr' and `int_return' entries
  562. */
  563. li r7, .L_MachineCheck - _start + EXC_OFF_SYS_RESET
  564. li r8, Alignment - _start + EXC_OFF_SYS_RESET
  565. 2:
  566. bl trap_reloc
  567. addi r7, r7, 0x100 /* next exception vector */
  568. cmplw 0, r7, r8
  569. blt 2b
  570. li r7, .L_Alignment - _start + EXC_OFF_SYS_RESET
  571. bl trap_reloc
  572. li r7, .L_ProgramCheck - _start + EXC_OFF_SYS_RESET
  573. bl trap_reloc
  574. li r7, .L_FPUnavailable - _start + EXC_OFF_SYS_RESET
  575. li r8, SystemCall - _start + EXC_OFF_SYS_RESET
  576. 3:
  577. bl trap_reloc
  578. addi r7, r7, 0x100 /* next exception vector */
  579. cmplw 0, r7, r8
  580. blt 3b
  581. li r7, .L_Trace - _start + EXC_OFF_SYS_RESET
  582. li r8, _end_of_vectors - _start + EXC_OFF_SYS_RESET
  583. 4:
  584. bl trap_reloc
  585. addi r7, r7, 0x100 /* next exception vector */
  586. cmplw 0, r7, r8
  587. blt 4b
  588. mfmsr r3 /* now that the vectors have */
  589. lis r7, MSR_IP@h /* relocated into low memory */
  590. ori r7, r7, MSR_IP@l /* MSR[IP] can be turned off */
  591. andc r3, r3, r7 /* (if it was on) */
  592. SYNC /* Some chip revs need this... */
  593. mtmsr r3
  594. SYNC
  595. mtlr r4 /* restore link register */
  596. blr