armv8_deprecated.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648
  1. /*
  2. * Copyright (C) 2014 ARM Limited
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. #include <linux/cpu.h>
  9. #include <linux/init.h>
  10. #include <linux/list.h>
  11. #include <linux/perf_event.h>
  12. #include <linux/sched.h>
  13. #include <linux/slab.h>
  14. #include <linux/sysctl.h>
  15. #include <asm/cpufeature.h>
  16. #include <asm/insn.h>
  17. #include <asm/opcodes.h>
  18. #include <asm/sysreg.h>
  19. #include <asm/system_misc.h>
  20. #include <asm/traps.h>
  21. #include <asm/uaccess.h>
  22. #include <asm/cpufeature.h>
  23. #define CREATE_TRACE_POINTS
  24. #include "trace-events-emulation.h"
  25. /*
  26. * The runtime support for deprecated instruction support can be in one of
  27. * following three states -
  28. *
  29. * 0 = undef
  30. * 1 = emulate (software emulation)
  31. * 2 = hw (supported in hardware)
  32. */
  33. enum insn_emulation_mode {
  34. INSN_UNDEF,
  35. INSN_EMULATE,
  36. INSN_HW,
  37. };
  38. enum legacy_insn_status {
  39. INSN_DEPRECATED,
  40. INSN_OBSOLETE,
  41. };
  42. struct insn_emulation_ops {
  43. const char *name;
  44. enum legacy_insn_status status;
  45. struct undef_hook *hooks;
  46. int (*set_hw_mode)(bool enable);
  47. };
  48. struct insn_emulation {
  49. struct list_head node;
  50. struct insn_emulation_ops *ops;
  51. int current_mode;
  52. int min;
  53. int max;
  54. };
  55. static LIST_HEAD(insn_emulation);
  56. static int nr_insn_emulated __initdata;
  57. static DEFINE_RAW_SPINLOCK(insn_emulation_lock);
  58. static void register_emulation_hooks(struct insn_emulation_ops *ops)
  59. {
  60. struct undef_hook *hook;
  61. BUG_ON(!ops->hooks);
  62. for (hook = ops->hooks; hook->instr_mask; hook++)
  63. register_undef_hook(hook);
  64. pr_notice("Registered %s emulation handler\n", ops->name);
  65. }
  66. static void remove_emulation_hooks(struct insn_emulation_ops *ops)
  67. {
  68. struct undef_hook *hook;
  69. BUG_ON(!ops->hooks);
  70. for (hook = ops->hooks; hook->instr_mask; hook++)
  71. unregister_undef_hook(hook);
  72. pr_notice("Removed %s emulation handler\n", ops->name);
  73. }
  74. static void enable_insn_hw_mode(void *data)
  75. {
  76. struct insn_emulation *insn = (struct insn_emulation *)data;
  77. if (insn->ops->set_hw_mode)
  78. insn->ops->set_hw_mode(true);
  79. }
  80. static void disable_insn_hw_mode(void *data)
  81. {
  82. struct insn_emulation *insn = (struct insn_emulation *)data;
  83. if (insn->ops->set_hw_mode)
  84. insn->ops->set_hw_mode(false);
  85. }
  86. /* Run set_hw_mode(mode) on all active CPUs */
  87. static int run_all_cpu_set_hw_mode(struct insn_emulation *insn, bool enable)
  88. {
  89. if (!insn->ops->set_hw_mode)
  90. return -EINVAL;
  91. if (enable)
  92. on_each_cpu(enable_insn_hw_mode, (void *)insn, true);
  93. else
  94. on_each_cpu(disable_insn_hw_mode, (void *)insn, true);
  95. return 0;
  96. }
  97. /*
  98. * Run set_hw_mode for all insns on a starting CPU.
  99. * Returns:
  100. * 0 - If all the hooks ran successfully.
  101. * -EINVAL - At least one hook is not supported by the CPU.
  102. */
  103. static int run_all_insn_set_hw_mode(unsigned int cpu)
  104. {
  105. int rc = 0;
  106. unsigned long flags;
  107. struct insn_emulation *insn;
  108. raw_spin_lock_irqsave(&insn_emulation_lock, flags);
  109. list_for_each_entry(insn, &insn_emulation, node) {
  110. bool enable = (insn->current_mode == INSN_HW);
  111. if (insn->ops->set_hw_mode && insn->ops->set_hw_mode(enable)) {
  112. pr_warn("CPU[%u] cannot support the emulation of %s",
  113. cpu, insn->ops->name);
  114. rc = -EINVAL;
  115. }
  116. }
  117. raw_spin_unlock_irqrestore(&insn_emulation_lock, flags);
  118. return rc;
  119. }
  120. static int update_insn_emulation_mode(struct insn_emulation *insn,
  121. enum insn_emulation_mode prev)
  122. {
  123. int ret = 0;
  124. switch (prev) {
  125. case INSN_UNDEF: /* Nothing to be done */
  126. break;
  127. case INSN_EMULATE:
  128. remove_emulation_hooks(insn->ops);
  129. break;
  130. case INSN_HW:
  131. if (!run_all_cpu_set_hw_mode(insn, false))
  132. pr_notice("Disabled %s support\n", insn->ops->name);
  133. break;
  134. }
  135. switch (insn->current_mode) {
  136. case INSN_UNDEF:
  137. break;
  138. case INSN_EMULATE:
  139. register_emulation_hooks(insn->ops);
  140. break;
  141. case INSN_HW:
  142. ret = run_all_cpu_set_hw_mode(insn, true);
  143. if (!ret)
  144. pr_notice("Enabled %s support\n", insn->ops->name);
  145. break;
  146. }
  147. return ret;
  148. }
  149. static void __init register_insn_emulation(struct insn_emulation_ops *ops)
  150. {
  151. unsigned long flags;
  152. struct insn_emulation *insn;
  153. insn = kzalloc(sizeof(*insn), GFP_KERNEL);
  154. insn->ops = ops;
  155. insn->min = INSN_UNDEF;
  156. switch (ops->status) {
  157. case INSN_DEPRECATED:
  158. insn->current_mode = INSN_EMULATE;
  159. /* Disable the HW mode if it was turned on at early boot time */
  160. run_all_cpu_set_hw_mode(insn, false);
  161. insn->max = INSN_HW;
  162. break;
  163. case INSN_OBSOLETE:
  164. insn->current_mode = INSN_UNDEF;
  165. insn->max = INSN_EMULATE;
  166. break;
  167. }
  168. raw_spin_lock_irqsave(&insn_emulation_lock, flags);
  169. list_add(&insn->node, &insn_emulation);
  170. nr_insn_emulated++;
  171. raw_spin_unlock_irqrestore(&insn_emulation_lock, flags);
  172. /* Register any handlers if required */
  173. update_insn_emulation_mode(insn, INSN_UNDEF);
  174. }
  175. static int emulation_proc_handler(struct ctl_table *table, int write,
  176. void __user *buffer, size_t *lenp,
  177. loff_t *ppos)
  178. {
  179. int ret = 0;
  180. struct insn_emulation *insn = (struct insn_emulation *) table->data;
  181. enum insn_emulation_mode prev_mode = insn->current_mode;
  182. table->data = &insn->current_mode;
  183. ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
  184. if (ret || !write || prev_mode == insn->current_mode)
  185. goto ret;
  186. ret = update_insn_emulation_mode(insn, prev_mode);
  187. if (ret) {
  188. /* Mode change failed, revert to previous mode. */
  189. insn->current_mode = prev_mode;
  190. update_insn_emulation_mode(insn, INSN_UNDEF);
  191. }
  192. ret:
  193. table->data = insn;
  194. return ret;
  195. }
  196. static struct ctl_table ctl_abi[] = {
  197. {
  198. .procname = "abi",
  199. .mode = 0555,
  200. },
  201. { }
  202. };
  203. static void __init register_insn_emulation_sysctl(struct ctl_table *table)
  204. {
  205. unsigned long flags;
  206. int i = 0;
  207. struct insn_emulation *insn;
  208. struct ctl_table *insns_sysctl, *sysctl;
  209. insns_sysctl = kzalloc(sizeof(*sysctl) * (nr_insn_emulated + 1),
  210. GFP_KERNEL);
  211. raw_spin_lock_irqsave(&insn_emulation_lock, flags);
  212. list_for_each_entry(insn, &insn_emulation, node) {
  213. sysctl = &insns_sysctl[i];
  214. sysctl->mode = 0644;
  215. sysctl->maxlen = sizeof(int);
  216. sysctl->procname = insn->ops->name;
  217. sysctl->data = insn;
  218. sysctl->extra1 = &insn->min;
  219. sysctl->extra2 = &insn->max;
  220. sysctl->proc_handler = emulation_proc_handler;
  221. i++;
  222. }
  223. raw_spin_unlock_irqrestore(&insn_emulation_lock, flags);
  224. table->child = insns_sysctl;
  225. register_sysctl_table(table);
  226. }
  227. /*
  228. * Implement emulation of the SWP/SWPB instructions using load-exclusive and
  229. * store-exclusive.
  230. *
  231. * Syntax of SWP{B} instruction: SWP{B}<c> <Rt>, <Rt2>, [<Rn>]
  232. * Where: Rt = destination
  233. * Rt2 = source
  234. * Rn = address
  235. */
  236. /*
  237. * Error-checking SWP macros implemented using ldxr{b}/stxr{b}
  238. */
  239. /* Arbitrary constant to ensure forward-progress of the LL/SC loop */
  240. #define __SWP_LL_SC_LOOPS 4
  241. #define __user_swpX_asm(data, addr, res, temp, temp2, B) \
  242. do { \
  243. uaccess_enable(); \
  244. __asm__ __volatile__( \
  245. " mov %w3, %w7\n" \
  246. "0: ldxr"B" %w2, [%4]\n" \
  247. "1: stxr"B" %w0, %w1, [%4]\n" \
  248. " cbz %w0, 2f\n" \
  249. " sub %w3, %w3, #1\n" \
  250. " cbnz %w3, 0b\n" \
  251. " mov %w0, %w5\n" \
  252. " b 3f\n" \
  253. "2:\n" \
  254. " mov %w1, %w2\n" \
  255. "3:\n" \
  256. " .pushsection .fixup,\"ax\"\n" \
  257. " .align 2\n" \
  258. "4: mov %w0, %w6\n" \
  259. " b 3b\n" \
  260. " .popsection" \
  261. _ASM_EXTABLE(0b, 4b) \
  262. _ASM_EXTABLE(1b, 4b) \
  263. : "=&r" (res), "+r" (data), "=&r" (temp), "=&r" (temp2) \
  264. : "r" ((unsigned long)addr), "i" (-EAGAIN), \
  265. "i" (-EFAULT), \
  266. "i" (__SWP_LL_SC_LOOPS) \
  267. : "memory"); \
  268. uaccess_disable(); \
  269. } while (0)
  270. #define __user_swp_asm(data, addr, res, temp, temp2) \
  271. __user_swpX_asm(data, addr, res, temp, temp2, "")
  272. #define __user_swpb_asm(data, addr, res, temp, temp2) \
  273. __user_swpX_asm(data, addr, res, temp, temp2, "b")
  274. /*
  275. * Bit 22 of the instruction encoding distinguishes between
  276. * the SWP and SWPB variants (bit set means SWPB).
  277. */
  278. #define TYPE_SWPB (1 << 22)
  279. static int emulate_swpX(unsigned int address, unsigned int *data,
  280. unsigned int type)
  281. {
  282. unsigned int res = 0;
  283. if ((type != TYPE_SWPB) && (address & 0x3)) {
  284. /* SWP to unaligned address not permitted */
  285. pr_debug("SWP instruction on unaligned pointer!\n");
  286. return -EFAULT;
  287. }
  288. while (1) {
  289. unsigned long temp, temp2;
  290. if (type == TYPE_SWPB)
  291. __user_swpb_asm(*data, address, res, temp, temp2);
  292. else
  293. __user_swp_asm(*data, address, res, temp, temp2);
  294. if (likely(res != -EAGAIN) || signal_pending(current))
  295. break;
  296. cond_resched();
  297. }
  298. return res;
  299. }
  300. #define ARM_OPCODE_CONDITION_UNCOND 0xf
  301. static unsigned int __kprobes aarch32_check_condition(u32 opcode, u32 psr)
  302. {
  303. u32 cc_bits = opcode >> 28;
  304. if (cc_bits != ARM_OPCODE_CONDITION_UNCOND) {
  305. if ((*aarch32_opcode_cond_checks[cc_bits])(psr))
  306. return ARM_OPCODE_CONDTEST_PASS;
  307. else
  308. return ARM_OPCODE_CONDTEST_FAIL;
  309. }
  310. return ARM_OPCODE_CONDTEST_UNCOND;
  311. }
  312. /*
  313. * swp_handler logs the id of calling process, dissects the instruction, sanity
  314. * checks the memory location, calls emulate_swpX for the actual operation and
  315. * deals with fixup/error handling before returning
  316. */
  317. static int swp_handler(struct pt_regs *regs, u32 instr)
  318. {
  319. u32 destreg, data, type, address = 0;
  320. int rn, rt2, res = 0;
  321. perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc);
  322. type = instr & TYPE_SWPB;
  323. switch (aarch32_check_condition(instr, regs->pstate)) {
  324. case ARM_OPCODE_CONDTEST_PASS:
  325. break;
  326. case ARM_OPCODE_CONDTEST_FAIL:
  327. /* Condition failed - return to next instruction */
  328. goto ret;
  329. case ARM_OPCODE_CONDTEST_UNCOND:
  330. /* If unconditional encoding - not a SWP, undef */
  331. return -EFAULT;
  332. default:
  333. return -EINVAL;
  334. }
  335. rn = aarch32_insn_extract_reg_num(instr, A32_RN_OFFSET);
  336. rt2 = aarch32_insn_extract_reg_num(instr, A32_RT2_OFFSET);
  337. address = (u32)regs->user_regs.regs[rn];
  338. data = (u32)regs->user_regs.regs[rt2];
  339. destreg = aarch32_insn_extract_reg_num(instr, A32_RT_OFFSET);
  340. pr_debug("addr in r%d->0x%08x, dest is r%d, source in r%d->0x%08x)\n",
  341. rn, address, destreg,
  342. aarch32_insn_extract_reg_num(instr, A32_RT2_OFFSET), data);
  343. /* Check access in reasonable access range for both SWP and SWPB */
  344. if (!access_ok(VERIFY_WRITE, (address & ~3), 4)) {
  345. pr_debug("SWP{B} emulation: access to 0x%08x not allowed!\n",
  346. address);
  347. goto fault;
  348. }
  349. res = emulate_swpX(address, &data, type);
  350. if (res == -EFAULT)
  351. goto fault;
  352. else if (res == 0)
  353. regs->user_regs.regs[destreg] = data;
  354. ret:
  355. if (type == TYPE_SWPB)
  356. trace_instruction_emulation("swpb", regs->pc);
  357. else
  358. trace_instruction_emulation("swp", regs->pc);
  359. pr_warn_ratelimited("\"%s\" (%ld) uses obsolete SWP{B} instruction at 0x%llx\n",
  360. current->comm, (unsigned long)current->pid, regs->pc);
  361. regs->pc += 4;
  362. return 0;
  363. fault:
  364. pr_debug("SWP{B} emulation: access caused memory abort!\n");
  365. arm64_notify_segfault(regs, address);
  366. return 0;
  367. }
  368. /*
  369. * Only emulate SWP/SWPB executed in ARM state/User mode.
  370. * The kernel must be SWP free and SWP{B} does not exist in Thumb.
  371. */
  372. static struct undef_hook swp_hooks[] = {
  373. {
  374. .instr_mask = 0x0fb00ff0,
  375. .instr_val = 0x01000090,
  376. .pstate_mask = COMPAT_PSR_MODE_MASK,
  377. .pstate_val = COMPAT_PSR_MODE_USR,
  378. .fn = swp_handler
  379. },
  380. { }
  381. };
  382. static struct insn_emulation_ops swp_ops = {
  383. .name = "swp",
  384. .status = INSN_OBSOLETE,
  385. .hooks = swp_hooks,
  386. .set_hw_mode = NULL,
  387. };
  388. static int cp15barrier_handler(struct pt_regs *regs, u32 instr)
  389. {
  390. perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc);
  391. switch (aarch32_check_condition(instr, regs->pstate)) {
  392. case ARM_OPCODE_CONDTEST_PASS:
  393. break;
  394. case ARM_OPCODE_CONDTEST_FAIL:
  395. /* Condition failed - return to next instruction */
  396. goto ret;
  397. case ARM_OPCODE_CONDTEST_UNCOND:
  398. /* If unconditional encoding - not a barrier instruction */
  399. return -EFAULT;
  400. default:
  401. return -EINVAL;
  402. }
  403. switch (aarch32_insn_mcr_extract_crm(instr)) {
  404. case 10:
  405. /*
  406. * dmb - mcr p15, 0, Rt, c7, c10, 5
  407. * dsb - mcr p15, 0, Rt, c7, c10, 4
  408. */
  409. if (aarch32_insn_mcr_extract_opc2(instr) == 5) {
  410. dmb(sy);
  411. trace_instruction_emulation(
  412. "mcr p15, 0, Rt, c7, c10, 5 ; dmb", regs->pc);
  413. } else {
  414. dsb(sy);
  415. trace_instruction_emulation(
  416. "mcr p15, 0, Rt, c7, c10, 4 ; dsb", regs->pc);
  417. }
  418. break;
  419. case 5:
  420. /*
  421. * isb - mcr p15, 0, Rt, c7, c5, 4
  422. *
  423. * Taking an exception or returning from one acts as an
  424. * instruction barrier. So no explicit barrier needed here.
  425. */
  426. trace_instruction_emulation(
  427. "mcr p15, 0, Rt, c7, c5, 4 ; isb", regs->pc);
  428. break;
  429. }
  430. ret:
  431. pr_warn_ratelimited("\"%s\" (%ld) uses deprecated CP15 Barrier instruction at 0x%llx\n",
  432. current->comm, (unsigned long)current->pid, regs->pc);
  433. regs->pc += 4;
  434. return 0;
  435. }
  436. static int cp15_barrier_set_hw_mode(bool enable)
  437. {
  438. if (enable)
  439. config_sctlr_el1(0, SCTLR_EL1_CP15BEN);
  440. else
  441. config_sctlr_el1(SCTLR_EL1_CP15BEN, 0);
  442. return 0;
  443. }
  444. static struct undef_hook cp15_barrier_hooks[] = {
  445. {
  446. .instr_mask = 0x0fff0fdf,
  447. .instr_val = 0x0e070f9a,
  448. .pstate_mask = COMPAT_PSR_MODE_MASK,
  449. .pstate_val = COMPAT_PSR_MODE_USR,
  450. .fn = cp15barrier_handler,
  451. },
  452. {
  453. .instr_mask = 0x0fff0fff,
  454. .instr_val = 0x0e070f95,
  455. .pstate_mask = COMPAT_PSR_MODE_MASK,
  456. .pstate_val = COMPAT_PSR_MODE_USR,
  457. .fn = cp15barrier_handler,
  458. },
  459. { }
  460. };
  461. static struct insn_emulation_ops cp15_barrier_ops = {
  462. .name = "cp15_barrier",
  463. .status = INSN_DEPRECATED,
  464. .hooks = cp15_barrier_hooks,
  465. .set_hw_mode = cp15_barrier_set_hw_mode,
  466. };
  467. static int setend_set_hw_mode(bool enable)
  468. {
  469. if (!cpu_supports_mixed_endian_el0())
  470. return -EINVAL;
  471. if (enable)
  472. config_sctlr_el1(SCTLR_EL1_SED, 0);
  473. else
  474. config_sctlr_el1(0, SCTLR_EL1_SED);
  475. return 0;
  476. }
  477. static int compat_setend_handler(struct pt_regs *regs, u32 big_endian)
  478. {
  479. char *insn;
  480. perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc);
  481. if (big_endian) {
  482. insn = "setend be";
  483. regs->pstate |= COMPAT_PSR_E_BIT;
  484. } else {
  485. insn = "setend le";
  486. regs->pstate &= ~COMPAT_PSR_E_BIT;
  487. }
  488. trace_instruction_emulation(insn, regs->pc);
  489. pr_warn_ratelimited("\"%s\" (%ld) uses deprecated setend instruction at 0x%llx\n",
  490. current->comm, (unsigned long)current->pid, regs->pc);
  491. return 0;
  492. }
  493. static int a32_setend_handler(struct pt_regs *regs, u32 instr)
  494. {
  495. int rc = compat_setend_handler(regs, (instr >> 9) & 1);
  496. regs->pc += 4;
  497. return rc;
  498. }
  499. static int t16_setend_handler(struct pt_regs *regs, u32 instr)
  500. {
  501. int rc = compat_setend_handler(regs, (instr >> 3) & 1);
  502. regs->pc += 2;
  503. return rc;
  504. }
  505. static struct undef_hook setend_hooks[] = {
  506. {
  507. .instr_mask = 0xfffffdff,
  508. .instr_val = 0xf1010000,
  509. .pstate_mask = COMPAT_PSR_MODE_MASK,
  510. .pstate_val = COMPAT_PSR_MODE_USR,
  511. .fn = a32_setend_handler,
  512. },
  513. {
  514. /* Thumb mode */
  515. .instr_mask = 0x0000fff7,
  516. .instr_val = 0x0000b650,
  517. .pstate_mask = (COMPAT_PSR_T_BIT | COMPAT_PSR_MODE_MASK),
  518. .pstate_val = (COMPAT_PSR_T_BIT | COMPAT_PSR_MODE_USR),
  519. .fn = t16_setend_handler,
  520. },
  521. {}
  522. };
  523. static struct insn_emulation_ops setend_ops = {
  524. .name = "setend",
  525. .status = INSN_DEPRECATED,
  526. .hooks = setend_hooks,
  527. .set_hw_mode = setend_set_hw_mode,
  528. };
  529. /*
  530. * Invoked as late_initcall, since not needed before init spawned.
  531. */
  532. static int __init armv8_deprecated_init(void)
  533. {
  534. if (IS_ENABLED(CONFIG_SWP_EMULATION))
  535. register_insn_emulation(&swp_ops);
  536. if (IS_ENABLED(CONFIG_CP15_BARRIER_EMULATION))
  537. register_insn_emulation(&cp15_barrier_ops);
  538. if (IS_ENABLED(CONFIG_SETEND_EMULATION)) {
  539. if(system_supports_mixed_endian_el0())
  540. register_insn_emulation(&setend_ops);
  541. else
  542. pr_info("setend instruction emulation is not supported on the system");
  543. }
  544. cpuhp_setup_state_nocalls(CPUHP_AP_ARM64_ISNDEP_STARTING,
  545. "AP_ARM64_ISNDEP_STARTING",
  546. run_all_insn_set_hw_mode, NULL);
  547. register_insn_emulation_sysctl(ctl_abi);
  548. return 0;
  549. }
  550. late_initcall(armv8_deprecated_init);