release.S 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150
  1. /*
  2. * Copyright 2004, 2007, 2008 Freescale Semiconductor.
  3. * Srikanth Srinivasan <srikanth.srinivaan@freescale.com>
  4. *
  5. * SPDX-License-Identifier: GPL-2.0+
  6. */
  7. #include <config.h>
  8. #include <mpc86xx.h>
  9. #include <ppc_asm.tmpl>
  10. #include <ppc_defs.h>
  11. #include <asm/cache.h>
  12. #include <asm/mmu.h>
  13. /* If this is a multi-cpu system then we need to handle the
  14. * 2nd cpu. The assumption is that the 2nd cpu is being
  15. * held in boot holdoff mode until the 1st cpu unlocks it
  16. * from Linux. We'll do some basic cpu init and then pass
  17. * it to the Linux Reset Vector.
  18. * Sri: Much of this initialization is not required. Linux
  19. * rewrites the bats, and the sprs and also enables the L1 cache.
  20. *
  21. * Core 0 must copy this to a 1M aligned region and set BPTR
  22. * to point to it.
  23. */
  24. .align 12
  25. .globl __secondary_start_page
  26. __secondary_start_page:
  27. .space 0x100 /* space over to reset vector loc */
  28. mfspr r0, MSSCR0
  29. andi. r0, r0, 0x0020
  30. rlwinm r0,r0,27,31,31
  31. mtspr PIR, r0
  32. /* Invalidate BATs */
  33. li r0, 0
  34. mtspr IBAT0U, r0
  35. mtspr IBAT1U, r0
  36. mtspr IBAT2U, r0
  37. mtspr IBAT3U, r0
  38. mtspr IBAT4U, r0
  39. mtspr IBAT5U, r0
  40. mtspr IBAT6U, r0
  41. mtspr IBAT7U, r0
  42. isync
  43. mtspr DBAT0U, r0
  44. mtspr DBAT1U, r0
  45. mtspr DBAT2U, r0
  46. mtspr DBAT3U, r0
  47. mtspr DBAT4U, r0
  48. mtspr DBAT5U, r0
  49. mtspr DBAT6U, r0
  50. mtspr DBAT7U, r0
  51. isync
  52. sync
  53. /* enable extended addressing */
  54. mfspr r0, HID0
  55. lis r0, (HID0_HIGH_BAT_EN | HID0_XBSEN | HID0_XAEN)@h
  56. ori r0, r0, (HID0_HIGH_BAT_EN | HID0_XBSEN | HID0_XAEN)@l
  57. mtspr HID0, r0
  58. sync
  59. isync
  60. #ifdef CONFIG_SYS_L2
  61. /* init the L2 cache */
  62. addis r3, r0, L2_INIT@h
  63. ori r3, r3, L2_INIT@l
  64. sync
  65. mtspr l2cr, r3
  66. #ifdef CONFIG_ALTIVEC
  67. dssall
  68. #endif
  69. /* invalidate the L2 cache */
  70. mfspr r3, l2cr
  71. rlwinm. r3, r3, 0, 0, 0
  72. beq 1f
  73. mfspr r3, l2cr
  74. rlwinm r3, r3, 0, 1, 31
  75. #ifdef CONFIG_ALTIVEC
  76. dssall
  77. #endif
  78. sync
  79. mtspr l2cr, r3
  80. sync
  81. 1: mfspr r3, l2cr
  82. oris r3, r3, L2CR_L2I@h
  83. mtspr l2cr, r3
  84. invl2:
  85. mfspr r3, l2cr
  86. andis. r3, r3, L2CR_L2I@h
  87. bne invl2
  88. sync
  89. #endif
  90. /* enable and invalidate the data cache */
  91. mfspr r3, HID0
  92. li r5, HID0_DCFI|HID0_DLOCK
  93. andc r3, r3, r5
  94. mtspr HID0, r3 /* no invalidate, unlock */
  95. ori r3, r3, HID0_DCE
  96. ori r5, r3, HID0_DCFI
  97. mtspr HID0, r5 /* enable + invalidate */
  98. mtspr HID0, r3 /* enable */
  99. sync
  100. #ifdef CONFIG_SYS_L2
  101. sync
  102. lis r3, L2_ENABLE@h
  103. ori r3, r3, L2_ENABLE@l
  104. mtspr l2cr, r3
  105. isync
  106. sync
  107. #endif
  108. /* enable and invalidate the instruction cache*/
  109. mfspr r3, HID0
  110. li r5, HID0_ICFI|HID0_ILOCK
  111. andc r3, r3, r5
  112. ori r3, r3, HID0_ICE
  113. ori r5, r3, HID0_ICFI
  114. mtspr HID0, r5
  115. mtspr HID0, r3
  116. isync
  117. sync
  118. /* TBEN in HID0 */
  119. mfspr r4, HID0
  120. oris r4, r4, 0x0400
  121. mtspr HID0, r4
  122. sync
  123. isync
  124. /* MCP|SYNCBE|ABE in HID1 */
  125. mfspr r4, HID1
  126. oris r4, r4, 0x8000
  127. ori r4, r4, 0x0C00
  128. mtspr HID1, r4
  129. sync
  130. isync
  131. lis r3, CONFIG_LINUX_RESET_VEC@h
  132. ori r3, r3, CONFIG_LINUX_RESET_VEC@l
  133. mtlr r3
  134. blr
  135. /* Never Returns, Running in Linux Now */