pacache.S 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287
  1. /*
  2. * PARISC TLB and cache flushing support
  3. * Copyright (C) 2000-2001 Hewlett-Packard (John Marvin)
  4. * Copyright (C) 2001 Matthew Wilcox (willy at parisc-linux.org)
  5. * Copyright (C) 2002 Richard Hirst (rhirst with parisc-linux.org)
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2, or (at your option)
  10. * any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  20. */
  21. /*
  22. * NOTE: fdc,fic, and pdc instructions that use base register modification
  23. * should only use index and base registers that are not shadowed,
  24. * so that the fast path emulation in the non access miss handler
  25. * can be used.
  26. */
  27. #ifdef CONFIG_64BIT
  28. .level 2.0w
  29. #else
  30. .level 2.0
  31. #endif
  32. #include <asm/psw.h>
  33. #include <asm/assembly.h>
  34. #include <asm/pgtable.h>
  35. #include <asm/cache.h>
  36. #include <linux/linkage.h>
  37. .text
  38. .align 128
  39. ENTRY_CFI(flush_tlb_all_local)
  40. .proc
  41. .callinfo NO_CALLS
  42. .entry
  43. /*
  44. * The pitlbe and pdtlbe instructions should only be used to
  45. * flush the entire tlb. Also, there needs to be no intervening
  46. * tlb operations, e.g. tlb misses, so the operation needs
  47. * to happen in real mode with all interruptions disabled.
  48. */
  49. /* pcxt_ssm_bug - relied upon translation! PA 2.0 Arch. F-4 and F-5 */
  50. rsm PSW_SM_I, %r19 /* save I-bit state */
  51. load32 PA(1f), %r1
  52. nop
  53. nop
  54. nop
  55. nop
  56. nop
  57. rsm PSW_SM_Q, %r0 /* prep to load iia queue */
  58. mtctl %r0, %cr17 /* Clear IIASQ tail */
  59. mtctl %r0, %cr17 /* Clear IIASQ head */
  60. mtctl %r1, %cr18 /* IIAOQ head */
  61. ldo 4(%r1), %r1
  62. mtctl %r1, %cr18 /* IIAOQ tail */
  63. load32 REAL_MODE_PSW, %r1
  64. mtctl %r1, %ipsw
  65. rfi
  66. nop
  67. 1: load32 PA(cache_info), %r1
  68. /* Flush Instruction Tlb */
  69. LDREG ITLB_SID_BASE(%r1), %r20
  70. LDREG ITLB_SID_STRIDE(%r1), %r21
  71. LDREG ITLB_SID_COUNT(%r1), %r22
  72. LDREG ITLB_OFF_BASE(%r1), %arg0
  73. LDREG ITLB_OFF_STRIDE(%r1), %arg1
  74. LDREG ITLB_OFF_COUNT(%r1), %arg2
  75. LDREG ITLB_LOOP(%r1), %arg3
  76. addib,COND(=) -1, %arg3, fitoneloop /* Preadjust and test */
  77. movb,<,n %arg3, %r31, fitdone /* If loop < 0, skip */
  78. copy %arg0, %r28 /* Init base addr */
  79. fitmanyloop: /* Loop if LOOP >= 2 */
  80. mtsp %r20, %sr1
  81. add %r21, %r20, %r20 /* increment space */
  82. copy %arg2, %r29 /* Init middle loop count */
  83. fitmanymiddle: /* Loop if LOOP >= 2 */
  84. addib,COND(>) -1, %r31, fitmanymiddle /* Adjusted inner loop decr */
  85. pitlbe %r0(%sr1, %r28)
  86. pitlbe,m %arg1(%sr1, %r28) /* Last pitlbe and addr adjust */
  87. addib,COND(>) -1, %r29, fitmanymiddle /* Middle loop decr */
  88. copy %arg3, %r31 /* Re-init inner loop count */
  89. movb,tr %arg0, %r28, fitmanyloop /* Re-init base addr */
  90. addib,COND(<=),n -1, %r22, fitdone /* Outer loop count decr */
  91. fitoneloop: /* Loop if LOOP = 1 */
  92. mtsp %r20, %sr1
  93. copy %arg0, %r28 /* init base addr */
  94. copy %arg2, %r29 /* init middle loop count */
  95. fitonemiddle: /* Loop if LOOP = 1 */
  96. addib,COND(>) -1, %r29, fitonemiddle /* Middle loop count decr */
  97. pitlbe,m %arg1(%sr1, %r28) /* pitlbe for one loop */
  98. addib,COND(>) -1, %r22, fitoneloop /* Outer loop count decr */
  99. add %r21, %r20, %r20 /* increment space */
  100. fitdone:
  101. /* Flush Data Tlb */
  102. LDREG DTLB_SID_BASE(%r1), %r20
  103. LDREG DTLB_SID_STRIDE(%r1), %r21
  104. LDREG DTLB_SID_COUNT(%r1), %r22
  105. LDREG DTLB_OFF_BASE(%r1), %arg0
  106. LDREG DTLB_OFF_STRIDE(%r1), %arg1
  107. LDREG DTLB_OFF_COUNT(%r1), %arg2
  108. LDREG DTLB_LOOP(%r1), %arg3
  109. addib,COND(=) -1, %arg3, fdtoneloop /* Preadjust and test */
  110. movb,<,n %arg3, %r31, fdtdone /* If loop < 0, skip */
  111. copy %arg0, %r28 /* Init base addr */
  112. fdtmanyloop: /* Loop if LOOP >= 2 */
  113. mtsp %r20, %sr1
  114. add %r21, %r20, %r20 /* increment space */
  115. copy %arg2, %r29 /* Init middle loop count */
  116. fdtmanymiddle: /* Loop if LOOP >= 2 */
  117. addib,COND(>) -1, %r31, fdtmanymiddle /* Adjusted inner loop decr */
  118. pdtlbe %r0(%sr1, %r28)
  119. pdtlbe,m %arg1(%sr1, %r28) /* Last pdtlbe and addr adjust */
  120. addib,COND(>) -1, %r29, fdtmanymiddle /* Middle loop decr */
  121. copy %arg3, %r31 /* Re-init inner loop count */
  122. movb,tr %arg0, %r28, fdtmanyloop /* Re-init base addr */
  123. addib,COND(<=),n -1, %r22,fdtdone /* Outer loop count decr */
  124. fdtoneloop: /* Loop if LOOP = 1 */
  125. mtsp %r20, %sr1
  126. copy %arg0, %r28 /* init base addr */
  127. copy %arg2, %r29 /* init middle loop count */
  128. fdtonemiddle: /* Loop if LOOP = 1 */
  129. addib,COND(>) -1, %r29, fdtonemiddle /* Middle loop count decr */
  130. pdtlbe,m %arg1(%sr1, %r28) /* pdtlbe for one loop */
  131. addib,COND(>) -1, %r22, fdtoneloop /* Outer loop count decr */
  132. add %r21, %r20, %r20 /* increment space */
  133. fdtdone:
  134. /*
  135. * Switch back to virtual mode
  136. */
  137. /* pcxt_ssm_bug */
  138. rsm PSW_SM_I, %r0
  139. load32 2f, %r1
  140. nop
  141. nop
  142. nop
  143. nop
  144. nop
  145. rsm PSW_SM_Q, %r0 /* prep to load iia queue */
  146. mtctl %r0, %cr17 /* Clear IIASQ tail */
  147. mtctl %r0, %cr17 /* Clear IIASQ head */
  148. mtctl %r1, %cr18 /* IIAOQ head */
  149. ldo 4(%r1), %r1
  150. mtctl %r1, %cr18 /* IIAOQ tail */
  151. load32 KERNEL_PSW, %r1
  152. or %r1, %r19, %r1 /* I-bit to state on entry */
  153. mtctl %r1, %ipsw /* restore I-bit (entire PSW) */
  154. rfi
  155. nop
  156. 2: bv %r0(%r2)
  157. nop
  158. .exit
  159. .procend
  160. ENDPROC_CFI(flush_tlb_all_local)
  161. .import cache_info,data
  162. ENTRY_CFI(flush_instruction_cache_local)
  163. .proc
  164. .callinfo NO_CALLS
  165. .entry
  166. load32 cache_info, %r1
  167. /* Flush Instruction Cache */
  168. LDREG ICACHE_BASE(%r1), %arg0
  169. LDREG ICACHE_STRIDE(%r1), %arg1
  170. LDREG ICACHE_COUNT(%r1), %arg2
  171. LDREG ICACHE_LOOP(%r1), %arg3
  172. rsm PSW_SM_I, %r22 /* No mmgt ops during loop*/
  173. mtsp %r0, %sr1
  174. addib,COND(=) -1, %arg3, fioneloop /* Preadjust and test */
  175. movb,<,n %arg3, %r31, fisync /* If loop < 0, do sync */
  176. fimanyloop: /* Loop if LOOP >= 2 */
  177. addib,COND(>) -1, %r31, fimanyloop /* Adjusted inner loop decr */
  178. fice %r0(%sr1, %arg0)
  179. fice,m %arg1(%sr1, %arg0) /* Last fice and addr adjust */
  180. movb,tr %arg3, %r31, fimanyloop /* Re-init inner loop count */
  181. addib,COND(<=),n -1, %arg2, fisync /* Outer loop decr */
  182. fioneloop: /* Loop if LOOP = 1 */
  183. /* Some implementations may flush with a single fice instruction */
  184. cmpib,COND(>>=),n 15, %arg2, fioneloop2
  185. fioneloop1:
  186. fice,m %arg1(%sr1, %arg0)
  187. fice,m %arg1(%sr1, %arg0)
  188. fice,m %arg1(%sr1, %arg0)
  189. fice,m %arg1(%sr1, %arg0)
  190. fice,m %arg1(%sr1, %arg0)
  191. fice,m %arg1(%sr1, %arg0)
  192. fice,m %arg1(%sr1, %arg0)
  193. fice,m %arg1(%sr1, %arg0)
  194. fice,m %arg1(%sr1, %arg0)
  195. fice,m %arg1(%sr1, %arg0)
  196. fice,m %arg1(%sr1, %arg0)
  197. fice,m %arg1(%sr1, %arg0)
  198. fice,m %arg1(%sr1, %arg0)
  199. fice,m %arg1(%sr1, %arg0)
  200. fice,m %arg1(%sr1, %arg0)
  201. addib,COND(>) -16, %arg2, fioneloop1
  202. fice,m %arg1(%sr1, %arg0)
  203. /* Check if done */
  204. cmpb,COND(=),n %arg2, %r0, fisync /* Predict branch taken */
  205. fioneloop2:
  206. addib,COND(>) -1, %arg2, fioneloop2 /* Outer loop count decr */
  207. fice,m %arg1(%sr1, %arg0) /* Fice for one loop */
  208. fisync:
  209. sync
  210. mtsm %r22 /* restore I-bit */
  211. bv %r0(%r2)
  212. nop
  213. .exit
  214. .procend
  215. ENDPROC_CFI(flush_instruction_cache_local)
  216. .import cache_info, data
  217. ENTRY_CFI(flush_data_cache_local)
  218. .proc
  219. .callinfo NO_CALLS
  220. .entry
  221. load32 cache_info, %r1
  222. /* Flush Data Cache */
  223. LDREG DCACHE_BASE(%r1), %arg0
  224. LDREG DCACHE_STRIDE(%r1), %arg1
  225. LDREG DCACHE_COUNT(%r1), %arg2
  226. LDREG DCACHE_LOOP(%r1), %arg3
  227. rsm PSW_SM_I, %r22 /* No mmgt ops during loop*/
  228. mtsp %r0, %sr1
  229. addib,COND(=) -1, %arg3, fdoneloop /* Preadjust and test */
  230. movb,<,n %arg3, %r31, fdsync /* If loop < 0, do sync */
  231. fdmanyloop: /* Loop if LOOP >= 2 */
  232. addib,COND(>) -1, %r31, fdmanyloop /* Adjusted inner loop decr */
  233. fdce %r0(%sr1, %arg0)
  234. fdce,m %arg1(%sr1, %arg0) /* Last fdce and addr adjust */
  235. movb,tr %arg3, %r31, fdmanyloop /* Re-init inner loop count */
  236. addib,COND(<=),n -1, %arg2, fdsync /* Outer loop decr */
  237. fdoneloop: /* Loop if LOOP = 1 */
  238. /* Some implementations may flush with a single fdce instruction */
  239. cmpib,COND(>>=),n 15, %arg2, fdoneloop2
  240. fdoneloop1:
  241. fdce,m %arg1(%sr1, %arg0)
  242. fdce,m %arg1(%sr1, %arg0)
  243. fdce,m %arg1(%sr1, %arg0)
  244. fdce,m %arg1(%sr1, %arg0)
  245. fdce,m %arg1(%sr1, %arg0)
  246. fdce,m %arg1(%sr1, %arg0)
  247. fdce,m %arg1(%sr1, %arg0)
  248. fdce,m %arg1(%sr1, %arg0)
  249. fdce,m %arg1(%sr1, %arg0)
  250. fdce,m %arg1(%sr1, %arg0)
  251. fdce,m %arg1(%sr1, %arg0)
  252. fdce,m %arg1(%sr1, %arg0)
  253. fdce,m %arg1(%sr1, %arg0)
  254. fdce,m %arg1(%sr1, %arg0)
  255. fdce,m %arg1(%sr1, %arg0)
  256. addib,COND(>) -16, %arg2, fdoneloop1
  257. fdce,m %arg1(%sr1, %arg0)
  258. /* Check if done */
  259. cmpb,COND(=),n %arg2, %r0, fdsync /* Predict branch taken */
  260. fdoneloop2:
  261. addib,COND(>) -1, %arg2, fdoneloop2 /* Outer loop count decr */
  262. fdce,m %arg1(%sr1, %arg0) /* Fdce for one loop */
  263. fdsync:
  264. syncdma
  265. sync
  266. mtsm %r22 /* restore I-bit */
  267. bv %r0(%r2)
  268. nop
  269. .exit
  270. .procend
  271. ENDPROC_CFI(flush_data_cache_local)
  272. .align 16
  273. /* Macros to serialize TLB purge operations on SMP. */
  274. .macro tlb_lock la,flags,tmp
  275. #ifdef CONFIG_SMP
  276. ldil L%pa_tlb_lock,%r1
  277. ldo R%pa_tlb_lock(%r1),\la
  278. rsm PSW_SM_I,\flags
  279. 1: LDCW 0(\la),\tmp
  280. cmpib,<>,n 0,\tmp,3f
  281. 2: ldw 0(\la),\tmp
  282. cmpb,<> %r0,\tmp,1b
  283. nop
  284. b,n 2b
  285. 3:
  286. #endif
  287. .endm
  288. .macro tlb_unlock la,flags,tmp
  289. #ifdef CONFIG_SMP
  290. ldi 1,\tmp
  291. stw \tmp,0(\la)
  292. mtsm \flags
  293. #endif
  294. .endm
  295. /* Clear page using kernel mapping. */
  296. ENTRY_CFI(clear_page_asm)
  297. .proc
  298. .callinfo NO_CALLS
  299. .entry
  300. #ifdef CONFIG_64BIT
  301. /* Unroll the loop. */
  302. ldi (PAGE_SIZE / 128), %r1
  303. 1:
  304. std %r0, 0(%r26)
  305. std %r0, 8(%r26)
  306. std %r0, 16(%r26)
  307. std %r0, 24(%r26)
  308. std %r0, 32(%r26)
  309. std %r0, 40(%r26)
  310. std %r0, 48(%r26)
  311. std %r0, 56(%r26)
  312. std %r0, 64(%r26)
  313. std %r0, 72(%r26)
  314. std %r0, 80(%r26)
  315. std %r0, 88(%r26)
  316. std %r0, 96(%r26)
  317. std %r0, 104(%r26)
  318. std %r0, 112(%r26)
  319. std %r0, 120(%r26)
  320. /* Note reverse branch hint for addib is taken. */
  321. addib,COND(>),n -1, %r1, 1b
  322. ldo 128(%r26), %r26
  323. #else
  324. /*
  325. * Note that until (if) we start saving the full 64-bit register
  326. * values on interrupt, we can't use std on a 32 bit kernel.
  327. */
  328. ldi (PAGE_SIZE / 64), %r1
  329. 1:
  330. stw %r0, 0(%r26)
  331. stw %r0, 4(%r26)
  332. stw %r0, 8(%r26)
  333. stw %r0, 12(%r26)
  334. stw %r0, 16(%r26)
  335. stw %r0, 20(%r26)
  336. stw %r0, 24(%r26)
  337. stw %r0, 28(%r26)
  338. stw %r0, 32(%r26)
  339. stw %r0, 36(%r26)
  340. stw %r0, 40(%r26)
  341. stw %r0, 44(%r26)
  342. stw %r0, 48(%r26)
  343. stw %r0, 52(%r26)
  344. stw %r0, 56(%r26)
  345. stw %r0, 60(%r26)
  346. addib,COND(>),n -1, %r1, 1b
  347. ldo 64(%r26), %r26
  348. #endif
  349. bv %r0(%r2)
  350. nop
  351. .exit
  352. .procend
  353. ENDPROC_CFI(clear_page_asm)
  354. /* Copy page using kernel mapping. */
  355. ENTRY_CFI(copy_page_asm)
  356. .proc
  357. .callinfo NO_CALLS
  358. .entry
  359. #ifdef CONFIG_64BIT
  360. /* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
  361. * Unroll the loop by hand and arrange insn appropriately.
  362. * Prefetch doesn't improve performance on rp3440.
  363. * GCC probably can do this just as well...
  364. */
  365. ldi (PAGE_SIZE / 128), %r1
  366. 1: ldd 0(%r25), %r19
  367. ldd 8(%r25), %r20
  368. ldd 16(%r25), %r21
  369. ldd 24(%r25), %r22
  370. std %r19, 0(%r26)
  371. std %r20, 8(%r26)
  372. ldd 32(%r25), %r19
  373. ldd 40(%r25), %r20
  374. std %r21, 16(%r26)
  375. std %r22, 24(%r26)
  376. ldd 48(%r25), %r21
  377. ldd 56(%r25), %r22
  378. std %r19, 32(%r26)
  379. std %r20, 40(%r26)
  380. ldd 64(%r25), %r19
  381. ldd 72(%r25), %r20
  382. std %r21, 48(%r26)
  383. std %r22, 56(%r26)
  384. ldd 80(%r25), %r21
  385. ldd 88(%r25), %r22
  386. std %r19, 64(%r26)
  387. std %r20, 72(%r26)
  388. ldd 96(%r25), %r19
  389. ldd 104(%r25), %r20
  390. std %r21, 80(%r26)
  391. std %r22, 88(%r26)
  392. ldd 112(%r25), %r21
  393. ldd 120(%r25), %r22
  394. ldo 128(%r25), %r25
  395. std %r19, 96(%r26)
  396. std %r20, 104(%r26)
  397. std %r21, 112(%r26)
  398. std %r22, 120(%r26)
  399. /* Note reverse branch hint for addib is taken. */
  400. addib,COND(>),n -1, %r1, 1b
  401. ldo 128(%r26), %r26
  402. #else
  403. /*
  404. * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
  405. * bundles (very restricted rules for bundling).
  406. * Note that until (if) we start saving
  407. * the full 64 bit register values on interrupt, we can't
  408. * use ldd/std on a 32 bit kernel.
  409. */
  410. ldw 0(%r25), %r19
  411. ldi (PAGE_SIZE / 64), %r1
  412. 1:
  413. ldw 4(%r25), %r20
  414. ldw 8(%r25), %r21
  415. ldw 12(%r25), %r22
  416. stw %r19, 0(%r26)
  417. stw %r20, 4(%r26)
  418. stw %r21, 8(%r26)
  419. stw %r22, 12(%r26)
  420. ldw 16(%r25), %r19
  421. ldw 20(%r25), %r20
  422. ldw 24(%r25), %r21
  423. ldw 28(%r25), %r22
  424. stw %r19, 16(%r26)
  425. stw %r20, 20(%r26)
  426. stw %r21, 24(%r26)
  427. stw %r22, 28(%r26)
  428. ldw 32(%r25), %r19
  429. ldw 36(%r25), %r20
  430. ldw 40(%r25), %r21
  431. ldw 44(%r25), %r22
  432. stw %r19, 32(%r26)
  433. stw %r20, 36(%r26)
  434. stw %r21, 40(%r26)
  435. stw %r22, 44(%r26)
  436. ldw 48(%r25), %r19
  437. ldw 52(%r25), %r20
  438. ldw 56(%r25), %r21
  439. ldw 60(%r25), %r22
  440. stw %r19, 48(%r26)
  441. stw %r20, 52(%r26)
  442. ldo 64(%r25), %r25
  443. stw %r21, 56(%r26)
  444. stw %r22, 60(%r26)
  445. ldo 64(%r26), %r26
  446. addib,COND(>),n -1, %r1, 1b
  447. ldw 0(%r25), %r19
  448. #endif
  449. bv %r0(%r2)
  450. nop
  451. .exit
  452. .procend
  453. ENDPROC_CFI(copy_page_asm)
  454. /*
  455. * NOTE: Code in clear_user_page has a hard coded dependency on the
  456. * maximum alias boundary being 4 Mb. We've been assured by the
  457. * parisc chip designers that there will not ever be a parisc
  458. * chip with a larger alias boundary (Never say never :-) ).
  459. *
  460. * Subtle: the dtlb miss handlers support the temp alias region by
  461. * "knowing" that if a dtlb miss happens within the temp alias
  462. * region it must have occurred while in clear_user_page. Since
  463. * this routine makes use of processor local translations, we
  464. * don't want to insert them into the kernel page table. Instead,
  465. * we load up some general registers (they need to be registers
  466. * which aren't shadowed) with the physical page numbers (preshifted
  467. * for tlb insertion) needed to insert the translations. When we
  468. * miss on the translation, the dtlb miss handler inserts the
  469. * translation into the tlb using these values:
  470. *
  471. * %r26 physical page (shifted for tlb insert) of "to" translation
  472. * %r23 physical page (shifted for tlb insert) of "from" translation
  473. */
  474. /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
  475. #define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
  476. .macro convert_phys_for_tlb_insert20 phys
  477. extrd,u \phys, 56-PAGE_ADD_SHIFT, 32-PAGE_ADD_SHIFT, \phys
  478. #if _PAGE_SIZE_ENCODING_DEFAULT
  479. depdi _PAGE_SIZE_ENCODING_DEFAULT, 63, (63-58), \phys
  480. #endif
  481. .endm
  482. /*
  483. * copy_user_page_asm() performs a page copy using mappings
  484. * equivalent to the user page mappings. It can be used to
  485. * implement copy_user_page() but unfortunately both the `from'
  486. * and `to' pages need to be flushed through mappings equivalent
  487. * to the user mappings after the copy because the kernel accesses
  488. * the `from' page through the kmap kernel mapping and the `to'
  489. * page needs to be flushed since code can be copied. As a
  490. * result, this implementation is less efficient than the simpler
  491. * copy using the kernel mapping. It only needs the `from' page
  492. * to flushed via the user mapping. The kunmap routines handle
  493. * the flushes needed for the kernel mapping.
  494. *
  495. * I'm still keeping this around because it may be possible to
  496. * use it if more information is passed into copy_user_page().
  497. * Have to do some measurements to see if it is worthwhile to
  498. * lobby for such a change.
  499. *
  500. */
  501. ENTRY_CFI(copy_user_page_asm)
  502. .proc
  503. .callinfo NO_CALLS
  504. .entry
  505. /* Convert virtual `to' and `from' addresses to physical addresses.
  506. Move `from' physical address to non shadowed register. */
  507. ldil L%(__PAGE_OFFSET), %r1
  508. sub %r26, %r1, %r26
  509. sub %r25, %r1, %r23
  510. ldil L%(TMPALIAS_MAP_START), %r28
  511. #ifdef CONFIG_64BIT
  512. #if (TMPALIAS_MAP_START >= 0x80000000)
  513. depdi 0, 31,32, %r28 /* clear any sign extension */
  514. #endif
  515. convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
  516. convert_phys_for_tlb_insert20 %r23 /* convert phys addr to tlb insert format */
  517. depd %r24,63,22, %r28 /* Form aliased virtual address 'to' */
  518. depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
  519. copy %r28, %r29
  520. depdi 1, 41,1, %r29 /* Form aliased virtual address 'from' */
  521. #else
  522. extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
  523. extrw,u %r23, 24,25, %r23 /* convert phys addr to tlb insert format */
  524. depw %r24, 31,22, %r28 /* Form aliased virtual address 'to' */
  525. depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
  526. copy %r28, %r29
  527. depwi 1, 9,1, %r29 /* Form aliased virtual address 'from' */
  528. #endif
  529. /* Purge any old translations */
  530. #ifdef CONFIG_PA20
  531. pdtlb,l %r0(%r28)
  532. pdtlb,l %r0(%r29)
  533. #else
  534. tlb_lock %r20,%r21,%r22
  535. pdtlb %r0(%r28)
  536. pdtlb %r0(%r29)
  537. tlb_unlock %r20,%r21,%r22
  538. #endif
  539. #ifdef CONFIG_64BIT
  540. /* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
  541. * Unroll the loop by hand and arrange insn appropriately.
  542. * GCC probably can do this just as well.
  543. */
  544. ldd 0(%r29), %r19
  545. ldi (PAGE_SIZE / 128), %r1
  546. 1: ldd 8(%r29), %r20
  547. ldd 16(%r29), %r21
  548. ldd 24(%r29), %r22
  549. std %r19, 0(%r28)
  550. std %r20, 8(%r28)
  551. ldd 32(%r29), %r19
  552. ldd 40(%r29), %r20
  553. std %r21, 16(%r28)
  554. std %r22, 24(%r28)
  555. ldd 48(%r29), %r21
  556. ldd 56(%r29), %r22
  557. std %r19, 32(%r28)
  558. std %r20, 40(%r28)
  559. ldd 64(%r29), %r19
  560. ldd 72(%r29), %r20
  561. std %r21, 48(%r28)
  562. std %r22, 56(%r28)
  563. ldd 80(%r29), %r21
  564. ldd 88(%r29), %r22
  565. std %r19, 64(%r28)
  566. std %r20, 72(%r28)
  567. ldd 96(%r29), %r19
  568. ldd 104(%r29), %r20
  569. std %r21, 80(%r28)
  570. std %r22, 88(%r28)
  571. ldd 112(%r29), %r21
  572. ldd 120(%r29), %r22
  573. std %r19, 96(%r28)
  574. std %r20, 104(%r28)
  575. ldo 128(%r29), %r29
  576. std %r21, 112(%r28)
  577. std %r22, 120(%r28)
  578. ldo 128(%r28), %r28
  579. /* conditional branches nullify on forward taken branch, and on
  580. * non-taken backward branch. Note that .+4 is a backwards branch.
  581. * The ldd should only get executed if the branch is taken.
  582. */
  583. addib,COND(>),n -1, %r1, 1b /* bundle 10 */
  584. ldd 0(%r29), %r19 /* start next loads */
  585. #else
  586. ldi (PAGE_SIZE / 64), %r1
  587. /*
  588. * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
  589. * bundles (very restricted rules for bundling). It probably
  590. * does OK on PCXU and better, but we could do better with
  591. * ldd/std instructions. Note that until (if) we start saving
  592. * the full 64 bit register values on interrupt, we can't
  593. * use ldd/std on a 32 bit kernel.
  594. */
  595. 1: ldw 0(%r29), %r19
  596. ldw 4(%r29), %r20
  597. ldw 8(%r29), %r21
  598. ldw 12(%r29), %r22
  599. stw %r19, 0(%r28)
  600. stw %r20, 4(%r28)
  601. stw %r21, 8(%r28)
  602. stw %r22, 12(%r28)
  603. ldw 16(%r29), %r19
  604. ldw 20(%r29), %r20
  605. ldw 24(%r29), %r21
  606. ldw 28(%r29), %r22
  607. stw %r19, 16(%r28)
  608. stw %r20, 20(%r28)
  609. stw %r21, 24(%r28)
  610. stw %r22, 28(%r28)
  611. ldw 32(%r29), %r19
  612. ldw 36(%r29), %r20
  613. ldw 40(%r29), %r21
  614. ldw 44(%r29), %r22
  615. stw %r19, 32(%r28)
  616. stw %r20, 36(%r28)
  617. stw %r21, 40(%r28)
  618. stw %r22, 44(%r28)
  619. ldw 48(%r29), %r19
  620. ldw 52(%r29), %r20
  621. ldw 56(%r29), %r21
  622. ldw 60(%r29), %r22
  623. stw %r19, 48(%r28)
  624. stw %r20, 52(%r28)
  625. stw %r21, 56(%r28)
  626. stw %r22, 60(%r28)
  627. ldo 64(%r28), %r28
  628. addib,COND(>) -1, %r1,1b
  629. ldo 64(%r29), %r29
  630. #endif
  631. bv %r0(%r2)
  632. nop
  633. .exit
  634. .procend
  635. ENDPROC_CFI(copy_user_page_asm)
  636. ENTRY_CFI(clear_user_page_asm)
  637. .proc
  638. .callinfo NO_CALLS
  639. .entry
  640. tophys_r1 %r26
  641. ldil L%(TMPALIAS_MAP_START), %r28
  642. #ifdef CONFIG_64BIT
  643. #if (TMPALIAS_MAP_START >= 0x80000000)
  644. depdi 0, 31,32, %r28 /* clear any sign extension */
  645. #endif
  646. convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
  647. depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
  648. depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
  649. #else
  650. extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
  651. depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
  652. depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
  653. #endif
  654. /* Purge any old translation */
  655. #ifdef CONFIG_PA20
  656. pdtlb,l %r0(%r28)
  657. #else
  658. tlb_lock %r20,%r21,%r22
  659. pdtlb %r0(%r28)
  660. tlb_unlock %r20,%r21,%r22
  661. #endif
  662. #ifdef CONFIG_64BIT
  663. ldi (PAGE_SIZE / 128), %r1
  664. /* PREFETCH (Write) has not (yet) been proven to help here */
  665. /* #define PREFETCHW_OP ldd 256(%0), %r0 */
  666. 1: std %r0, 0(%r28)
  667. std %r0, 8(%r28)
  668. std %r0, 16(%r28)
  669. std %r0, 24(%r28)
  670. std %r0, 32(%r28)
  671. std %r0, 40(%r28)
  672. std %r0, 48(%r28)
  673. std %r0, 56(%r28)
  674. std %r0, 64(%r28)
  675. std %r0, 72(%r28)
  676. std %r0, 80(%r28)
  677. std %r0, 88(%r28)
  678. std %r0, 96(%r28)
  679. std %r0, 104(%r28)
  680. std %r0, 112(%r28)
  681. std %r0, 120(%r28)
  682. addib,COND(>) -1, %r1, 1b
  683. ldo 128(%r28), %r28
  684. #else /* ! CONFIG_64BIT */
  685. ldi (PAGE_SIZE / 64), %r1
  686. 1: stw %r0, 0(%r28)
  687. stw %r0, 4(%r28)
  688. stw %r0, 8(%r28)
  689. stw %r0, 12(%r28)
  690. stw %r0, 16(%r28)
  691. stw %r0, 20(%r28)
  692. stw %r0, 24(%r28)
  693. stw %r0, 28(%r28)
  694. stw %r0, 32(%r28)
  695. stw %r0, 36(%r28)
  696. stw %r0, 40(%r28)
  697. stw %r0, 44(%r28)
  698. stw %r0, 48(%r28)
  699. stw %r0, 52(%r28)
  700. stw %r0, 56(%r28)
  701. stw %r0, 60(%r28)
  702. addib,COND(>) -1, %r1, 1b
  703. ldo 64(%r28), %r28
  704. #endif /* CONFIG_64BIT */
  705. bv %r0(%r2)
  706. nop
  707. .exit
  708. .procend
  709. ENDPROC_CFI(clear_user_page_asm)
  710. ENTRY_CFI(flush_dcache_page_asm)
  711. .proc
  712. .callinfo NO_CALLS
  713. .entry
  714. ldil L%(TMPALIAS_MAP_START), %r28
  715. #ifdef CONFIG_64BIT
  716. #if (TMPALIAS_MAP_START >= 0x80000000)
  717. depdi 0, 31,32, %r28 /* clear any sign extension */
  718. #endif
  719. convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
  720. depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
  721. depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
  722. #else
  723. extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
  724. depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
  725. depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
  726. #endif
  727. /* Purge any old translation */
  728. #ifdef CONFIG_PA20
  729. pdtlb,l %r0(%r28)
  730. #else
  731. tlb_lock %r20,%r21,%r22
  732. pdtlb %r0(%r28)
  733. tlb_unlock %r20,%r21,%r22
  734. #endif
  735. ldil L%dcache_stride, %r1
  736. ldw R%dcache_stride(%r1), r31
  737. #ifdef CONFIG_64BIT
  738. depdi,z 1, 63-PAGE_SHIFT,1, %r25
  739. #else
  740. depwi,z 1, 31-PAGE_SHIFT,1, %r25
  741. #endif
  742. add %r28, %r25, %r25
  743. sub %r25, r31, %r25
  744. 1: fdc,m r31(%r28)
  745. fdc,m r31(%r28)
  746. fdc,m r31(%r28)
  747. fdc,m r31(%r28)
  748. fdc,m r31(%r28)
  749. fdc,m r31(%r28)
  750. fdc,m r31(%r28)
  751. fdc,m r31(%r28)
  752. fdc,m r31(%r28)
  753. fdc,m r31(%r28)
  754. fdc,m r31(%r28)
  755. fdc,m r31(%r28)
  756. fdc,m r31(%r28)
  757. fdc,m r31(%r28)
  758. fdc,m r31(%r28)
  759. cmpb,COND(<<) %r28, %r25,1b
  760. fdc,m r31(%r28)
  761. sync
  762. bv %r0(%r2)
  763. nop
  764. .exit
  765. .procend
  766. ENDPROC_CFI(flush_dcache_page_asm)
  767. ENTRY_CFI(flush_icache_page_asm)
  768. .proc
  769. .callinfo NO_CALLS
  770. .entry
  771. ldil L%(TMPALIAS_MAP_START), %r28
  772. #ifdef CONFIG_64BIT
  773. #if (TMPALIAS_MAP_START >= 0x80000000)
  774. depdi 0, 31,32, %r28 /* clear any sign extension */
  775. #endif
  776. convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
  777. depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
  778. depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
  779. #else
  780. extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
  781. depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
  782. depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
  783. #endif
  784. /* Purge any old translation. Note that the FIC instruction
  785. * may use either the instruction or data TLB. Given that we
  786. * have a flat address space, it's not clear which TLB will be
  787. * used. So, we purge both entries. */
  788. #ifdef CONFIG_PA20
  789. pdtlb,l %r0(%r28)
  790. pitlb,l %r0(%sr4,%r28)
  791. #else
  792. tlb_lock %r20,%r21,%r22
  793. pdtlb %r0(%r28)
  794. pitlb %r0(%sr4,%r28)
  795. tlb_unlock %r20,%r21,%r22
  796. #endif
  797. ldil L%icache_stride, %r1
  798. ldw R%icache_stride(%r1), %r31
  799. #ifdef CONFIG_64BIT
  800. depdi,z 1, 63-PAGE_SHIFT,1, %r25
  801. #else
  802. depwi,z 1, 31-PAGE_SHIFT,1, %r25
  803. #endif
  804. add %r28, %r25, %r25
  805. sub %r25, %r31, %r25
  806. /* fic only has the type 26 form on PA1.1, requiring an
  807. * explicit space specification, so use %sr4 */
  808. 1: fic,m %r31(%sr4,%r28)
  809. fic,m %r31(%sr4,%r28)
  810. fic,m %r31(%sr4,%r28)
  811. fic,m %r31(%sr4,%r28)
  812. fic,m %r31(%sr4,%r28)
  813. fic,m %r31(%sr4,%r28)
  814. fic,m %r31(%sr4,%r28)
  815. fic,m %r31(%sr4,%r28)
  816. fic,m %r31(%sr4,%r28)
  817. fic,m %r31(%sr4,%r28)
  818. fic,m %r31(%sr4,%r28)
  819. fic,m %r31(%sr4,%r28)
  820. fic,m %r31(%sr4,%r28)
  821. fic,m %r31(%sr4,%r28)
  822. fic,m %r31(%sr4,%r28)
  823. cmpb,COND(<<) %r28, %r25,1b
  824. fic,m %r31(%sr4,%r28)
  825. sync
  826. bv %r0(%r2)
  827. nop
  828. .exit
  829. .procend
  830. ENDPROC_CFI(flush_icache_page_asm)
  831. ENTRY_CFI(flush_kernel_dcache_page_asm)
  832. .proc
  833. .callinfo NO_CALLS
  834. .entry
  835. ldil L%dcache_stride, %r1
  836. ldw R%dcache_stride(%r1), %r23
  837. #ifdef CONFIG_64BIT
  838. depdi,z 1, 63-PAGE_SHIFT,1, %r25
  839. #else
  840. depwi,z 1, 31-PAGE_SHIFT,1, %r25
  841. #endif
  842. add %r26, %r25, %r25
  843. sub %r25, %r23, %r25
  844. 1: fdc,m %r23(%r26)
  845. fdc,m %r23(%r26)
  846. fdc,m %r23(%r26)
  847. fdc,m %r23(%r26)
  848. fdc,m %r23(%r26)
  849. fdc,m %r23(%r26)
  850. fdc,m %r23(%r26)
  851. fdc,m %r23(%r26)
  852. fdc,m %r23(%r26)
  853. fdc,m %r23(%r26)
  854. fdc,m %r23(%r26)
  855. fdc,m %r23(%r26)
  856. fdc,m %r23(%r26)
  857. fdc,m %r23(%r26)
  858. fdc,m %r23(%r26)
  859. cmpb,COND(<<) %r26, %r25,1b
  860. fdc,m %r23(%r26)
  861. sync
  862. bv %r0(%r2)
  863. nop
  864. .exit
  865. .procend
  866. ENDPROC_CFI(flush_kernel_dcache_page_asm)
  867. ENTRY_CFI(purge_kernel_dcache_page_asm)
  868. .proc
  869. .callinfo NO_CALLS
  870. .entry
  871. ldil L%dcache_stride, %r1
  872. ldw R%dcache_stride(%r1), %r23
  873. #ifdef CONFIG_64BIT
  874. depdi,z 1, 63-PAGE_SHIFT,1, %r25
  875. #else
  876. depwi,z 1, 31-PAGE_SHIFT,1, %r25
  877. #endif
  878. add %r26, %r25, %r25
  879. sub %r25, %r23, %r25
  880. 1: pdc,m %r23(%r26)
  881. pdc,m %r23(%r26)
  882. pdc,m %r23(%r26)
  883. pdc,m %r23(%r26)
  884. pdc,m %r23(%r26)
  885. pdc,m %r23(%r26)
  886. pdc,m %r23(%r26)
  887. pdc,m %r23(%r26)
  888. pdc,m %r23(%r26)
  889. pdc,m %r23(%r26)
  890. pdc,m %r23(%r26)
  891. pdc,m %r23(%r26)
  892. pdc,m %r23(%r26)
  893. pdc,m %r23(%r26)
  894. pdc,m %r23(%r26)
  895. cmpb,COND(<<) %r26, %r25, 1b
  896. pdc,m %r23(%r26)
  897. sync
  898. bv %r0(%r2)
  899. nop
  900. .exit
  901. .procend
  902. ENDPROC_CFI(purge_kernel_dcache_page_asm)
  903. ENTRY_CFI(flush_user_dcache_range_asm)
  904. .proc
  905. .callinfo NO_CALLS
  906. .entry
  907. ldil L%dcache_stride, %r1
  908. ldw R%dcache_stride(%r1), %r23
  909. ldo -1(%r23), %r21
  910. ANDCM %r26, %r21, %r26
  911. 1: cmpb,COND(<<),n %r26, %r25, 1b
  912. fdc,m %r23(%sr3, %r26)
  913. sync
  914. bv %r0(%r2)
  915. nop
  916. .exit
  917. .procend
  918. ENDPROC_CFI(flush_user_dcache_range_asm)
  919. ENTRY_CFI(flush_kernel_dcache_range_asm)
  920. .proc
  921. .callinfo NO_CALLS
  922. .entry
  923. ldil L%dcache_stride, %r1
  924. ldw R%dcache_stride(%r1), %r23
  925. ldo -1(%r23), %r21
  926. ANDCM %r26, %r21, %r26
  927. 1: cmpb,COND(<<),n %r26, %r25,1b
  928. fdc,m %r23(%r26)
  929. sync
  930. syncdma
  931. bv %r0(%r2)
  932. nop
  933. .exit
  934. .procend
  935. ENDPROC_CFI(flush_kernel_dcache_range_asm)
  936. ENTRY_CFI(flush_user_icache_range_asm)
  937. .proc
  938. .callinfo NO_CALLS
  939. .entry
  940. ldil L%icache_stride, %r1
  941. ldw R%icache_stride(%r1), %r23
  942. ldo -1(%r23), %r21
  943. ANDCM %r26, %r21, %r26
  944. 1: cmpb,COND(<<),n %r26, %r25,1b
  945. fic,m %r23(%sr3, %r26)
  946. sync
  947. bv %r0(%r2)
  948. nop
  949. .exit
  950. .procend
  951. ENDPROC_CFI(flush_user_icache_range_asm)
  952. ENTRY_CFI(flush_kernel_icache_page)
  953. .proc
  954. .callinfo NO_CALLS
  955. .entry
  956. ldil L%icache_stride, %r1
  957. ldw R%icache_stride(%r1), %r23
  958. #ifdef CONFIG_64BIT
  959. depdi,z 1, 63-PAGE_SHIFT,1, %r25
  960. #else
  961. depwi,z 1, 31-PAGE_SHIFT,1, %r25
  962. #endif
  963. add %r26, %r25, %r25
  964. sub %r25, %r23, %r25
  965. 1: fic,m %r23(%sr4, %r26)
  966. fic,m %r23(%sr4, %r26)
  967. fic,m %r23(%sr4, %r26)
  968. fic,m %r23(%sr4, %r26)
  969. fic,m %r23(%sr4, %r26)
  970. fic,m %r23(%sr4, %r26)
  971. fic,m %r23(%sr4, %r26)
  972. fic,m %r23(%sr4, %r26)
  973. fic,m %r23(%sr4, %r26)
  974. fic,m %r23(%sr4, %r26)
  975. fic,m %r23(%sr4, %r26)
  976. fic,m %r23(%sr4, %r26)
  977. fic,m %r23(%sr4, %r26)
  978. fic,m %r23(%sr4, %r26)
  979. fic,m %r23(%sr4, %r26)
  980. cmpb,COND(<<) %r26, %r25, 1b
  981. fic,m %r23(%sr4, %r26)
  982. sync
  983. bv %r0(%r2)
  984. nop
  985. .exit
  986. .procend
  987. ENDPROC_CFI(flush_kernel_icache_page)
  988. ENTRY_CFI(flush_kernel_icache_range_asm)
  989. .proc
  990. .callinfo NO_CALLS
  991. .entry
  992. ldil L%icache_stride, %r1
  993. ldw R%icache_stride(%r1), %r23
  994. ldo -1(%r23), %r21
  995. ANDCM %r26, %r21, %r26
  996. 1: cmpb,COND(<<),n %r26, %r25, 1b
  997. fic,m %r23(%sr4, %r26)
  998. sync
  999. bv %r0(%r2)
  1000. nop
  1001. .exit
  1002. .procend
  1003. ENDPROC_CFI(flush_kernel_icache_range_asm)
  1004. /* align should cover use of rfi in disable_sr_hashing_asm and
  1005. * srdis_done.
  1006. */
  1007. .align 256
  1008. ENTRY_CFI(disable_sr_hashing_asm)
  1009. .proc
  1010. .callinfo NO_CALLS
  1011. .entry
  1012. /*
  1013. * Switch to real mode
  1014. */
  1015. /* pcxt_ssm_bug */
  1016. rsm PSW_SM_I, %r0
  1017. load32 PA(1f), %r1
  1018. nop
  1019. nop
  1020. nop
  1021. nop
  1022. nop
  1023. rsm PSW_SM_Q, %r0 /* prep to load iia queue */
  1024. mtctl %r0, %cr17 /* Clear IIASQ tail */
  1025. mtctl %r0, %cr17 /* Clear IIASQ head */
  1026. mtctl %r1, %cr18 /* IIAOQ head */
  1027. ldo 4(%r1), %r1
  1028. mtctl %r1, %cr18 /* IIAOQ tail */
  1029. load32 REAL_MODE_PSW, %r1
  1030. mtctl %r1, %ipsw
  1031. rfi
  1032. nop
  1033. 1: cmpib,=,n SRHASH_PCXST, %r26,srdis_pcxs
  1034. cmpib,=,n SRHASH_PCXL, %r26,srdis_pcxl
  1035. cmpib,=,n SRHASH_PA20, %r26,srdis_pa20
  1036. b,n srdis_done
  1037. srdis_pcxs:
  1038. /* Disable Space Register Hashing for PCXS,PCXT,PCXT' */
  1039. .word 0x141c1a00 /* mfdiag %dr0, %r28 */
  1040. .word 0x141c1a00 /* must issue twice */
  1041. depwi 0,18,1, %r28 /* Clear DHE (dcache hash enable) */
  1042. depwi 0,20,1, %r28 /* Clear IHE (icache hash enable) */
  1043. .word 0x141c1600 /* mtdiag %r28, %dr0 */
  1044. .word 0x141c1600 /* must issue twice */
  1045. b,n srdis_done
  1046. srdis_pcxl:
  1047. /* Disable Space Register Hashing for PCXL */
  1048. .word 0x141c0600 /* mfdiag %dr0, %r28 */
  1049. depwi 0,28,2, %r28 /* Clear DHASH_EN & IHASH_EN */
  1050. .word 0x141c0240 /* mtdiag %r28, %dr0 */
  1051. b,n srdis_done
  1052. srdis_pa20:
  1053. /* Disable Space Register Hashing for PCXU,PCXU+,PCXW,PCXW+,PCXW2 */
  1054. .word 0x144008bc /* mfdiag %dr2, %r28 */
  1055. depdi 0, 54,1, %r28 /* clear DIAG_SPHASH_ENAB (bit 54) */
  1056. .word 0x145c1840 /* mtdiag %r28, %dr2 */
  1057. srdis_done:
  1058. /* Switch back to virtual mode */
  1059. rsm PSW_SM_I, %r0 /* prep to load iia queue */
  1060. load32 2f, %r1
  1061. nop
  1062. nop
  1063. nop
  1064. nop
  1065. nop
  1066. rsm PSW_SM_Q, %r0 /* prep to load iia queue */
  1067. mtctl %r0, %cr17 /* Clear IIASQ tail */
  1068. mtctl %r0, %cr17 /* Clear IIASQ head */
  1069. mtctl %r1, %cr18 /* IIAOQ head */
  1070. ldo 4(%r1), %r1
  1071. mtctl %r1, %cr18 /* IIAOQ tail */
  1072. load32 KERNEL_PSW, %r1
  1073. mtctl %r1, %ipsw
  1074. rfi
  1075. nop
  1076. 2: bv %r0(%r2)
  1077. nop
  1078. .exit
  1079. .procend
  1080. ENDPROC_CFI(disable_sr_hashing_asm)
  1081. .end