entry.S 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868
  1. /*
  2. * Low-level exception handling code
  3. *
  4. * Copyright (C) 2012 ARM Ltd.
  5. * Authors: Catalin Marinas <catalin.marinas@arm.com>
  6. * Will Deacon <will.deacon@arm.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  19. */
  20. #include <linux/init.h>
  21. #include <linux/linkage.h>
  22. #include <asm/alternative.h>
  23. #include <asm/assembler.h>
  24. #include <asm/asm-offsets.h>
  25. #include <asm/cpufeature.h>
  26. #include <asm/errno.h>
  27. #include <asm/esr.h>
  28. #include <asm/irq.h>
  29. #include <asm/memory.h>
  30. #include <asm/ptrace.h>
  31. #include <asm/uaccess.h>
  32. #include <asm/thread_info.h>
  33. #include <asm/asm-uaccess.h>
  34. #include <asm/unistd.h>
  35. /*
  36. * Context tracking subsystem. Used to instrument transitions
  37. * between user and kernel mode.
  38. */
  39. .macro ct_user_exit, syscall = 0
  40. #ifdef CONFIG_CONTEXT_TRACKING
  41. bl context_tracking_user_exit
  42. .if \syscall == 1
  43. /*
  44. * Save/restore needed during syscalls. Restore syscall arguments from
  45. * the values already saved on stack during kernel_entry.
  46. */
  47. ldp x0, x1, [sp]
  48. ldp x2, x3, [sp, #S_X2]
  49. ldp x4, x5, [sp, #S_X4]
  50. ldp x6, x7, [sp, #S_X6]
  51. .endif
  52. #endif
  53. .endm
  54. .macro ct_user_enter
  55. #ifdef CONFIG_CONTEXT_TRACKING
  56. bl context_tracking_user_enter
  57. #endif
  58. .endm
  59. /*
  60. * Bad Abort numbers
  61. *-----------------
  62. */
  63. #define BAD_SYNC 0
  64. #define BAD_IRQ 1
  65. #define BAD_FIQ 2
  66. #define BAD_ERROR 3
  67. .macro kernel_entry, el, regsize = 64
  68. sub sp, sp, #S_FRAME_SIZE
  69. .if \regsize == 32
  70. mov w0, w0 // zero upper 32 bits of x0
  71. .endif
  72. stp x0, x1, [sp, #16 * 0]
  73. stp x2, x3, [sp, #16 * 1]
  74. stp x4, x5, [sp, #16 * 2]
  75. stp x6, x7, [sp, #16 * 3]
  76. stp x8, x9, [sp, #16 * 4]
  77. stp x10, x11, [sp, #16 * 5]
  78. stp x12, x13, [sp, #16 * 6]
  79. stp x14, x15, [sp, #16 * 7]
  80. stp x16, x17, [sp, #16 * 8]
  81. stp x18, x19, [sp, #16 * 9]
  82. stp x20, x21, [sp, #16 * 10]
  83. stp x22, x23, [sp, #16 * 11]
  84. stp x24, x25, [sp, #16 * 12]
  85. stp x26, x27, [sp, #16 * 13]
  86. stp x28, x29, [sp, #16 * 14]
  87. .if \el == 0
  88. mrs x21, sp_el0
  89. ldr_this_cpu tsk, __entry_task, x20 // Ensure MDSCR_EL1.SS is clear,
  90. ldr x19, [tsk, #TSK_TI_FLAGS] // since we can unmask debug
  91. disable_step_tsk x19, x20 // exceptions when scheduling.
  92. mov x29, xzr // fp pointed to user-space
  93. .else
  94. add x21, sp, #S_FRAME_SIZE
  95. get_thread_info tsk
  96. /* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
  97. ldr x20, [tsk, #TSK_TI_ADDR_LIMIT]
  98. str x20, [sp, #S_ORIG_ADDR_LIMIT]
  99. mov x20, #TASK_SIZE_64
  100. str x20, [tsk, #TSK_TI_ADDR_LIMIT]
  101. /* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
  102. .endif /* \el == 0 */
  103. mrs x22, elr_el1
  104. mrs x23, spsr_el1
  105. stp lr, x21, [sp, #S_LR]
  106. #ifdef CONFIG_ARM64_SW_TTBR0_PAN
  107. /*
  108. * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
  109. * EL0, there is no need to check the state of TTBR0_EL1 since
  110. * accesses are always enabled.
  111. * Note that the meaning of this bit differs from the ARMv8.1 PAN
  112. * feature as all TTBR0_EL1 accesses are disabled, not just those to
  113. * user mappings.
  114. */
  115. alternative_if ARM64_HAS_PAN
  116. b 1f // skip TTBR0 PAN
  117. alternative_else_nop_endif
  118. .if \el != 0
  119. mrs x21, ttbr0_el1
  120. tst x21, #0xffff << 48 // Check for the reserved ASID
  121. orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR
  122. b.eq 1f // TTBR0 access already disabled
  123. and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR
  124. .endif
  125. __uaccess_ttbr0_disable x21
  126. 1:
  127. #endif
  128. stp x22, x23, [sp, #S_PC]
  129. /*
  130. * Set syscallno to -1 by default (overridden later if real syscall).
  131. */
  132. .if \el == 0
  133. mvn x21, xzr
  134. str x21, [sp, #S_SYSCALLNO]
  135. .endif
  136. /*
  137. * Set sp_el0 to current thread_info.
  138. */
  139. .if \el == 0
  140. msr sp_el0, tsk
  141. .endif
  142. /*
  143. * Registers that may be useful after this macro is invoked:
  144. *
  145. * x21 - aborted SP
  146. * x22 - aborted PC
  147. * x23 - aborted PSTATE
  148. */
  149. .endm
  150. .macro kernel_exit, el
  151. .if \el != 0
  152. /* Restore the task's original addr_limit. */
  153. ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
  154. str x20, [tsk, #TSK_TI_ADDR_LIMIT]
  155. /* No need to restore UAO, it will be restored from SPSR_EL1 */
  156. .endif
  157. ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
  158. .if \el == 0
  159. ct_user_enter
  160. .endif
  161. #ifdef CONFIG_ARM64_SW_TTBR0_PAN
  162. /*
  163. * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
  164. * PAN bit checking.
  165. */
  166. alternative_if ARM64_HAS_PAN
  167. b 2f // skip TTBR0 PAN
  168. alternative_else_nop_endif
  169. .if \el != 0
  170. tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
  171. .endif
  172. __uaccess_ttbr0_enable x0
  173. .if \el == 0
  174. /*
  175. * Enable errata workarounds only if returning to user. The only
  176. * workaround currently required for TTBR0_EL1 changes are for the
  177. * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
  178. * corruption).
  179. */
  180. post_ttbr0_update_workaround
  181. .endif
  182. 1:
  183. .if \el != 0
  184. and x22, x22, #~PSR_PAN_BIT // ARMv8.0 CPUs do not understand this bit
  185. .endif
  186. 2:
  187. #endif
  188. .if \el == 0
  189. ldr x23, [sp, #S_SP] // load return stack pointer
  190. msr sp_el0, x23
  191. #ifdef CONFIG_ARM64_ERRATUM_845719
  192. alternative_if ARM64_WORKAROUND_845719
  193. tbz x22, #4, 1f
  194. #ifdef CONFIG_PID_IN_CONTEXTIDR
  195. mrs x29, contextidr_el1
  196. msr contextidr_el1, x29
  197. #else
  198. msr contextidr_el1, xzr
  199. #endif
  200. 1:
  201. alternative_else_nop_endif
  202. #endif
  203. .endif
  204. msr elr_el1, x21 // set up the return data
  205. msr spsr_el1, x22
  206. ldp x0, x1, [sp, #16 * 0]
  207. ldp x2, x3, [sp, #16 * 1]
  208. ldp x4, x5, [sp, #16 * 2]
  209. ldp x6, x7, [sp, #16 * 3]
  210. ldp x8, x9, [sp, #16 * 4]
  211. ldp x10, x11, [sp, #16 * 5]
  212. ldp x12, x13, [sp, #16 * 6]
  213. ldp x14, x15, [sp, #16 * 7]
  214. ldp x16, x17, [sp, #16 * 8]
  215. ldp x18, x19, [sp, #16 * 9]
  216. ldp x20, x21, [sp, #16 * 10]
  217. ldp x22, x23, [sp, #16 * 11]
  218. ldp x24, x25, [sp, #16 * 12]
  219. ldp x26, x27, [sp, #16 * 13]
  220. ldp x28, x29, [sp, #16 * 14]
  221. ldr lr, [sp, #S_LR]
  222. add sp, sp, #S_FRAME_SIZE // restore sp
  223. eret // return to kernel
  224. .endm
  225. .macro irq_stack_entry
  226. mov x19, sp // preserve the original sp
  227. /*
  228. * Compare sp with the base of the task stack.
  229. * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
  230. * and should switch to the irq stack.
  231. */
  232. ldr x25, [tsk, TSK_STACK]
  233. eor x25, x25, x19
  234. and x25, x25, #~(THREAD_SIZE - 1)
  235. cbnz x25, 9998f
  236. adr_this_cpu x25, irq_stack, x26
  237. mov x26, #IRQ_STACK_START_SP
  238. add x26, x25, x26
  239. /* switch to the irq stack */
  240. mov sp, x26
  241. /*
  242. * Add a dummy stack frame, this non-standard format is fixed up
  243. * by unwind_frame()
  244. */
  245. stp x29, x19, [sp, #-16]!
  246. mov x29, sp
  247. 9998:
  248. .endm
  249. /*
  250. * x19 should be preserved between irq_stack_entry and
  251. * irq_stack_exit.
  252. */
  253. .macro irq_stack_exit
  254. mov sp, x19
  255. .endm
  256. /*
  257. * These are the registers used in the syscall handler, and allow us to
  258. * have in theory up to 7 arguments to a function - x0 to x6.
  259. *
  260. * x7 is reserved for the system call number in 32-bit mode.
  261. */
  262. sc_nr .req x25 // number of system calls
  263. scno .req x26 // syscall number
  264. stbl .req x27 // syscall table pointer
  265. tsk .req x28 // current thread_info
  266. /*
  267. * Interrupt handling.
  268. */
  269. .macro irq_handler
  270. ldr_l x1, handle_arch_irq
  271. mov x0, sp
  272. irq_stack_entry
  273. blr x1
  274. irq_stack_exit
  275. .endm
  276. .text
  277. /*
  278. * Exception vectors.
  279. */
  280. .pushsection ".entry.text", "ax"
  281. .align 11
  282. ENTRY(vectors)
  283. ventry el1_sync_invalid // Synchronous EL1t
  284. ventry el1_irq_invalid // IRQ EL1t
  285. ventry el1_fiq_invalid // FIQ EL1t
  286. ventry el1_error_invalid // Error EL1t
  287. ventry el1_sync // Synchronous EL1h
  288. ventry el1_irq // IRQ EL1h
  289. ventry el1_fiq_invalid // FIQ EL1h
  290. ventry el1_error_invalid // Error EL1h
  291. ventry el0_sync // Synchronous 64-bit EL0
  292. ventry el0_irq // IRQ 64-bit EL0
  293. ventry el0_fiq_invalid // FIQ 64-bit EL0
  294. ventry el0_error_invalid // Error 64-bit EL0
  295. #ifdef CONFIG_COMPAT
  296. ventry el0_sync_compat // Synchronous 32-bit EL0
  297. ventry el0_irq_compat // IRQ 32-bit EL0
  298. ventry el0_fiq_invalid_compat // FIQ 32-bit EL0
  299. ventry el0_error_invalid_compat // Error 32-bit EL0
  300. #else
  301. ventry el0_sync_invalid // Synchronous 32-bit EL0
  302. ventry el0_irq_invalid // IRQ 32-bit EL0
  303. ventry el0_fiq_invalid // FIQ 32-bit EL0
  304. ventry el0_error_invalid // Error 32-bit EL0
  305. #endif
  306. END(vectors)
  307. /*
  308. * Invalid mode handlers
  309. */
  310. .macro inv_entry, el, reason, regsize = 64
  311. kernel_entry \el, \regsize
  312. mov x0, sp
  313. mov x1, #\reason
  314. mrs x2, esr_el1
  315. b bad_mode
  316. .endm
  317. el0_sync_invalid:
  318. inv_entry 0, BAD_SYNC
  319. ENDPROC(el0_sync_invalid)
  320. el0_irq_invalid:
  321. inv_entry 0, BAD_IRQ
  322. ENDPROC(el0_irq_invalid)
  323. el0_fiq_invalid:
  324. inv_entry 0, BAD_FIQ
  325. ENDPROC(el0_fiq_invalid)
  326. el0_error_invalid:
  327. inv_entry 0, BAD_ERROR
  328. ENDPROC(el0_error_invalid)
  329. #ifdef CONFIG_COMPAT
  330. el0_fiq_invalid_compat:
  331. inv_entry 0, BAD_FIQ, 32
  332. ENDPROC(el0_fiq_invalid_compat)
  333. el0_error_invalid_compat:
  334. inv_entry 0, BAD_ERROR, 32
  335. ENDPROC(el0_error_invalid_compat)
  336. #endif
  337. el1_sync_invalid:
  338. inv_entry 1, BAD_SYNC
  339. ENDPROC(el1_sync_invalid)
  340. el1_irq_invalid:
  341. inv_entry 1, BAD_IRQ
  342. ENDPROC(el1_irq_invalid)
  343. el1_fiq_invalid:
  344. inv_entry 1, BAD_FIQ
  345. ENDPROC(el1_fiq_invalid)
  346. el1_error_invalid:
  347. inv_entry 1, BAD_ERROR
  348. ENDPROC(el1_error_invalid)
  349. /*
  350. * EL1 mode handlers.
  351. */
  352. .align 6
  353. el1_sync:
  354. kernel_entry 1
  355. mrs x1, esr_el1 // read the syndrome register
  356. lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class
  357. cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1
  358. b.eq el1_da
  359. cmp x24, #ESR_ELx_EC_IABT_CUR // instruction abort in EL1
  360. b.eq el1_ia
  361. cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
  362. b.eq el1_undef
  363. cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
  364. b.eq el1_sp_pc
  365. cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
  366. b.eq el1_sp_pc
  367. cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL1
  368. b.eq el1_undef
  369. cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1
  370. b.ge el1_dbg
  371. b el1_inv
  372. el1_ia:
  373. /*
  374. * Fall through to the Data abort case
  375. */
  376. el1_da:
  377. /*
  378. * Data abort handling
  379. */
  380. mrs x3, far_el1
  381. enable_dbg
  382. // re-enable interrupts if they were enabled in the aborted context
  383. tbnz x23, #7, 1f // PSR_I_BIT
  384. enable_irq
  385. 1:
  386. clear_address_tag x0, x3
  387. mov x2, sp // struct pt_regs
  388. bl do_mem_abort
  389. // disable interrupts before pulling preserved data off the stack
  390. disable_irq
  391. kernel_exit 1
  392. el1_sp_pc:
  393. /*
  394. * Stack or PC alignment exception handling
  395. */
  396. mrs x0, far_el1
  397. enable_dbg
  398. mov x2, sp
  399. b do_sp_pc_abort
  400. el1_undef:
  401. /*
  402. * Undefined instruction
  403. */
  404. enable_dbg
  405. mov x0, sp
  406. b do_undefinstr
  407. el1_dbg:
  408. /*
  409. * Debug exception handling
  410. */
  411. cmp x24, #ESR_ELx_EC_BRK64 // if BRK64
  412. cinc x24, x24, eq // set bit '0'
  413. tbz x24, #0, el1_inv // EL1 only
  414. mrs x0, far_el1
  415. mov x2, sp // struct pt_regs
  416. bl do_debug_exception
  417. kernel_exit 1
  418. el1_inv:
  419. // TODO: add support for undefined instructions in kernel mode
  420. enable_dbg
  421. mov x0, sp
  422. mov x2, x1
  423. mov x1, #BAD_SYNC
  424. b bad_mode
  425. ENDPROC(el1_sync)
  426. .align 6
  427. el1_irq:
  428. kernel_entry 1
  429. enable_dbg
  430. #ifdef CONFIG_TRACE_IRQFLAGS
  431. bl trace_hardirqs_off
  432. #endif
  433. irq_handler
  434. #ifdef CONFIG_PREEMPT
  435. ldr w24, [tsk, #TSK_TI_PREEMPT] // get preempt count
  436. cbnz w24, 1f // preempt count != 0
  437. ldr x0, [tsk, #TSK_TI_FLAGS] // get flags
  438. tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
  439. bl el1_preempt
  440. 1:
  441. #endif
  442. #ifdef CONFIG_TRACE_IRQFLAGS
  443. bl trace_hardirqs_on
  444. #endif
  445. kernel_exit 1
  446. ENDPROC(el1_irq)
  447. #ifdef CONFIG_PREEMPT
  448. el1_preempt:
  449. mov x24, lr
  450. 1: bl preempt_schedule_irq // irq en/disable is done inside
  451. ldr x0, [tsk, #TSK_TI_FLAGS] // get new tasks TI_FLAGS
  452. tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
  453. ret x24
  454. #endif
  455. /*
  456. * EL0 mode handlers.
  457. */
  458. .align 6
  459. el0_sync:
  460. kernel_entry 0
  461. mrs x25, esr_el1 // read the syndrome register
  462. lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class
  463. cmp x24, #ESR_ELx_EC_SVC64 // SVC in 64-bit state
  464. b.eq el0_svc
  465. cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0
  466. b.eq el0_da
  467. cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0
  468. b.eq el0_ia
  469. cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access
  470. b.eq el0_fpsimd_acc
  471. cmp x24, #ESR_ELx_EC_FP_EXC64 // FP/ASIMD exception
  472. b.eq el0_fpsimd_exc
  473. cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
  474. b.eq el0_sys
  475. cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
  476. b.eq el0_sp_pc
  477. cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
  478. b.eq el0_sp_pc
  479. cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
  480. b.eq el0_undef
  481. cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
  482. b.ge el0_dbg
  483. b el0_inv
  484. #ifdef CONFIG_COMPAT
  485. .align 6
  486. el0_sync_compat:
  487. kernel_entry 0, 32
  488. mrs x25, esr_el1 // read the syndrome register
  489. lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class
  490. cmp x24, #ESR_ELx_EC_SVC32 // SVC in 32-bit state
  491. b.eq el0_svc_compat
  492. cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0
  493. b.eq el0_da
  494. cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0
  495. b.eq el0_ia
  496. cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access
  497. b.eq el0_fpsimd_acc
  498. cmp x24, #ESR_ELx_EC_FP_EXC32 // FP/ASIMD exception
  499. b.eq el0_fpsimd_exc
  500. cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
  501. b.eq el0_sp_pc
  502. cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
  503. b.eq el0_undef
  504. cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap
  505. b.eq el0_undef
  506. cmp x24, #ESR_ELx_EC_CP15_64 // CP15 MRRC/MCRR trap
  507. b.eq el0_undef
  508. cmp x24, #ESR_ELx_EC_CP14_MR // CP14 MRC/MCR trap
  509. b.eq el0_undef
  510. cmp x24, #ESR_ELx_EC_CP14_LS // CP14 LDC/STC trap
  511. b.eq el0_undef
  512. cmp x24, #ESR_ELx_EC_CP14_64 // CP14 MRRC/MCRR trap
  513. b.eq el0_undef
  514. cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
  515. b.ge el0_dbg
  516. b el0_inv
  517. el0_svc_compat:
  518. /*
  519. * AArch32 syscall handling
  520. */
  521. adrp stbl, compat_sys_call_table // load compat syscall table pointer
  522. uxtw scno, w7 // syscall number in w7 (r7)
  523. mov sc_nr, #__NR_compat_syscalls
  524. b el0_svc_naked
  525. .align 6
  526. el0_irq_compat:
  527. kernel_entry 0, 32
  528. b el0_irq_naked
  529. #endif
  530. el0_da:
  531. /*
  532. * Data abort handling
  533. */
  534. mrs x26, far_el1
  535. // enable interrupts before calling the main handler
  536. enable_dbg_and_irq
  537. ct_user_exit
  538. clear_address_tag x0, x26
  539. mov x1, x25
  540. mov x2, sp
  541. bl do_mem_abort
  542. b ret_to_user
  543. el0_ia:
  544. /*
  545. * Instruction abort handling
  546. */
  547. mrs x26, far_el1
  548. // enable interrupts before calling the main handler
  549. enable_dbg_and_irq
  550. ct_user_exit
  551. mov x0, x26
  552. mov x1, x25
  553. mov x2, sp
  554. bl do_mem_abort
  555. b ret_to_user
  556. el0_fpsimd_acc:
  557. /*
  558. * Floating Point or Advanced SIMD access
  559. */
  560. enable_dbg
  561. ct_user_exit
  562. mov x0, x25
  563. mov x1, sp
  564. bl do_fpsimd_acc
  565. b ret_to_user
  566. el0_fpsimd_exc:
  567. /*
  568. * Floating Point or Advanced SIMD exception
  569. */
  570. enable_dbg
  571. ct_user_exit
  572. mov x0, x25
  573. mov x1, sp
  574. bl do_fpsimd_exc
  575. b ret_to_user
  576. el0_sp_pc:
  577. /*
  578. * Stack or PC alignment exception handling
  579. */
  580. mrs x26, far_el1
  581. // enable interrupts before calling the main handler
  582. enable_dbg_and_irq
  583. ct_user_exit
  584. mov x0, x26
  585. mov x1, x25
  586. mov x2, sp
  587. bl do_sp_pc_abort
  588. b ret_to_user
  589. el0_undef:
  590. /*
  591. * Undefined instruction
  592. */
  593. // enable interrupts before calling the main handler
  594. enable_dbg_and_irq
  595. ct_user_exit
  596. mov x0, sp
  597. bl do_undefinstr
  598. b ret_to_user
  599. el0_sys:
  600. /*
  601. * System instructions, for trapped cache maintenance instructions
  602. */
  603. enable_dbg_and_irq
  604. ct_user_exit
  605. mov x0, x25
  606. mov x1, sp
  607. bl do_sysinstr
  608. b ret_to_user
  609. el0_dbg:
  610. /*
  611. * Debug exception handling
  612. */
  613. tbnz x24, #0, el0_inv // EL0 only
  614. mrs x0, far_el1
  615. mov x1, x25
  616. mov x2, sp
  617. bl do_debug_exception
  618. enable_dbg
  619. ct_user_exit
  620. b ret_to_user
  621. el0_inv:
  622. enable_dbg
  623. ct_user_exit
  624. mov x0, sp
  625. mov x1, #BAD_SYNC
  626. mov x2, x25
  627. bl bad_el0_sync
  628. b ret_to_user
  629. ENDPROC(el0_sync)
  630. .align 6
  631. el0_irq:
  632. kernel_entry 0
  633. el0_irq_naked:
  634. enable_dbg
  635. #ifdef CONFIG_TRACE_IRQFLAGS
  636. bl trace_hardirqs_off
  637. #endif
  638. ct_user_exit
  639. irq_handler
  640. #ifdef CONFIG_TRACE_IRQFLAGS
  641. bl trace_hardirqs_on
  642. #endif
  643. b ret_to_user
  644. ENDPROC(el0_irq)
  645. /*
  646. * Register switch for AArch64. The callee-saved registers need to be saved
  647. * and restored. On entry:
  648. * x0 = previous task_struct (must be preserved across the switch)
  649. * x1 = next task_struct
  650. * Previous and next are guaranteed not to be the same.
  651. *
  652. */
  653. ENTRY(cpu_switch_to)
  654. mov x10, #THREAD_CPU_CONTEXT
  655. add x8, x0, x10
  656. mov x9, sp
  657. stp x19, x20, [x8], #16 // store callee-saved registers
  658. stp x21, x22, [x8], #16
  659. stp x23, x24, [x8], #16
  660. stp x25, x26, [x8], #16
  661. stp x27, x28, [x8], #16
  662. stp x29, x9, [x8], #16
  663. str lr, [x8]
  664. add x8, x1, x10
  665. ldp x19, x20, [x8], #16 // restore callee-saved registers
  666. ldp x21, x22, [x8], #16
  667. ldp x23, x24, [x8], #16
  668. ldp x25, x26, [x8], #16
  669. ldp x27, x28, [x8], #16
  670. ldp x29, x9, [x8], #16
  671. ldr lr, [x8]
  672. mov sp, x9
  673. msr sp_el0, x1
  674. ret
  675. ENDPROC(cpu_switch_to)
  676. /*
  677. * This is the fast syscall return path. We do as little as possible here,
  678. * and this includes saving x0 back into the kernel stack.
  679. */
  680. ret_fast_syscall:
  681. disable_irq // disable interrupts
  682. str x0, [sp, #S_X0] // returned x0
  683. ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for syscall tracing
  684. and x2, x1, #_TIF_SYSCALL_WORK
  685. cbnz x2, ret_fast_syscall_trace
  686. and x2, x1, #_TIF_WORK_MASK
  687. cbnz x2, work_pending
  688. enable_step_tsk x1, x2
  689. kernel_exit 0
  690. ret_fast_syscall_trace:
  691. enable_irq // enable interrupts
  692. b __sys_trace_return_skipped // we already saved x0
  693. /*
  694. * Ok, we need to do extra processing, enter the slow path.
  695. */
  696. work_pending:
  697. mov x0, sp // 'regs'
  698. bl do_notify_resume
  699. #ifdef CONFIG_TRACE_IRQFLAGS
  700. bl trace_hardirqs_on // enabled while in userspace
  701. #endif
  702. ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for single-step
  703. b finish_ret_to_user
  704. /*
  705. * "slow" syscall return path.
  706. */
  707. ret_to_user:
  708. disable_irq // disable interrupts
  709. ldr x1, [tsk, #TSK_TI_FLAGS]
  710. and x2, x1, #_TIF_WORK_MASK
  711. cbnz x2, work_pending
  712. finish_ret_to_user:
  713. enable_step_tsk x1, x2
  714. kernel_exit 0
  715. ENDPROC(ret_to_user)
  716. /*
  717. * This is how we return from a fork.
  718. */
  719. ENTRY(ret_from_fork)
  720. bl schedule_tail
  721. cbz x19, 1f // not a kernel thread
  722. mov x0, x20
  723. blr x19
  724. 1: get_thread_info tsk
  725. b ret_to_user
  726. ENDPROC(ret_from_fork)
  727. /*
  728. * SVC handler.
  729. */
  730. .align 6
  731. el0_svc:
  732. adrp stbl, sys_call_table // load syscall table pointer
  733. uxtw scno, w8 // syscall number in w8
  734. mov sc_nr, #__NR_syscalls
  735. el0_svc_naked: // compat entry point
  736. stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number
  737. enable_dbg_and_irq
  738. ct_user_exit 1
  739. ldr x16, [tsk, #TSK_TI_FLAGS] // check for syscall hooks
  740. tst x16, #_TIF_SYSCALL_WORK
  741. b.ne __sys_trace
  742. cmp scno, sc_nr // check upper syscall limit
  743. b.hs ni_sys
  744. ldr x16, [stbl, scno, lsl #3] // address in the syscall table
  745. blr x16 // call sys_* routine
  746. b ret_fast_syscall
  747. ni_sys:
  748. mov x0, sp
  749. bl do_ni_syscall
  750. b ret_fast_syscall
  751. ENDPROC(el0_svc)
  752. /*
  753. * This is the really slow path. We're going to be doing context
  754. * switches, and waiting for our parent to respond.
  755. */
  756. __sys_trace:
  757. mov w0, #-1 // set default errno for
  758. cmp scno, x0 // user-issued syscall(-1)
  759. b.ne 1f
  760. mov x0, #-ENOSYS
  761. str x0, [sp, #S_X0]
  762. 1: mov x0, sp
  763. bl syscall_trace_enter
  764. cmp w0, #-1 // skip the syscall?
  765. b.eq __sys_trace_return_skipped
  766. uxtw scno, w0 // syscall number (possibly new)
  767. mov x1, sp // pointer to regs
  768. cmp scno, sc_nr // check upper syscall limit
  769. b.hs __ni_sys_trace
  770. ldp x0, x1, [sp] // restore the syscall args
  771. ldp x2, x3, [sp, #S_X2]
  772. ldp x4, x5, [sp, #S_X4]
  773. ldp x6, x7, [sp, #S_X6]
  774. ldr x16, [stbl, scno, lsl #3] // address in the syscall table
  775. blr x16 // call sys_* routine
  776. __sys_trace_return:
  777. str x0, [sp, #S_X0] // save returned x0
  778. __sys_trace_return_skipped:
  779. mov x0, sp
  780. bl syscall_trace_exit
  781. b ret_to_user
  782. __ni_sys_trace:
  783. mov x0, sp
  784. bl do_ni_syscall
  785. b __sys_trace_return
  786. .popsection // .entry.text
  787. /*
  788. * Special system call wrappers.
  789. */
  790. ENTRY(sys_rt_sigreturn_wrapper)
  791. mov x0, sp
  792. b sys_rt_sigreturn
  793. ENDPROC(sys_rt_sigreturn_wrapper)