pacache.S 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300
  1. /*
  2. * PARISC TLB and cache flushing support
  3. * Copyright (C) 2000-2001 Hewlett-Packard (John Marvin)
  4. * Copyright (C) 2001 Matthew Wilcox (willy at parisc-linux.org)
  5. * Copyright (C) 2002 Richard Hirst (rhirst with parisc-linux.org)
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2, or (at your option)
  10. * any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  20. */
  21. /*
  22. * NOTE: fdc,fic, and pdc instructions that use base register modification
  23. * should only use index and base registers that are not shadowed,
  24. * so that the fast path emulation in the non access miss handler
  25. * can be used.
  26. */
  27. #ifdef CONFIG_64BIT
  28. .level 2.0w
  29. #else
  30. .level 2.0
  31. #endif
  32. #include <asm/psw.h>
  33. #include <asm/assembly.h>
  34. #include <asm/pgtable.h>
  35. #include <asm/cache.h>
  36. #include <linux/linkage.h>
  37. .text
  38. .align 128
  39. ENTRY(flush_tlb_all_local)
  40. .proc
  41. .callinfo NO_CALLS
  42. .entry
  43. /*
  44. * The pitlbe and pdtlbe instructions should only be used to
  45. * flush the entire tlb. Also, there needs to be no intervening
  46. * tlb operations, e.g. tlb misses, so the operation needs
  47. * to happen in real mode with all interruptions disabled.
  48. */
  49. /* pcxt_ssm_bug - relied upon translation! PA 2.0 Arch. F-4 and F-5 */
  50. rsm PSW_SM_I, %r19 /* save I-bit state */
  51. load32 PA(1f), %r1
  52. nop
  53. nop
  54. nop
  55. nop
  56. nop
  57. rsm PSW_SM_Q, %r0 /* prep to load iia queue */
  58. mtctl %r0, %cr17 /* Clear IIASQ tail */
  59. mtctl %r0, %cr17 /* Clear IIASQ head */
  60. mtctl %r1, %cr18 /* IIAOQ head */
  61. ldo 4(%r1), %r1
  62. mtctl %r1, %cr18 /* IIAOQ tail */
  63. load32 REAL_MODE_PSW, %r1
  64. mtctl %r1, %ipsw
  65. rfi
  66. nop
  67. 1: load32 PA(cache_info), %r1
  68. /* Flush Instruction Tlb */
  69. LDREG ITLB_SID_BASE(%r1), %r20
  70. LDREG ITLB_SID_STRIDE(%r1), %r21
  71. LDREG ITLB_SID_COUNT(%r1), %r22
  72. LDREG ITLB_OFF_BASE(%r1), %arg0
  73. LDREG ITLB_OFF_STRIDE(%r1), %arg1
  74. LDREG ITLB_OFF_COUNT(%r1), %arg2
  75. LDREG ITLB_LOOP(%r1), %arg3
  76. addib,COND(=) -1, %arg3, fitoneloop /* Preadjust and test */
  77. movb,<,n %arg3, %r31, fitdone /* If loop < 0, skip */
  78. copy %arg0, %r28 /* Init base addr */
  79. fitmanyloop: /* Loop if LOOP >= 2 */
  80. mtsp %r20, %sr1
  81. add %r21, %r20, %r20 /* increment space */
  82. copy %arg2, %r29 /* Init middle loop count */
  83. fitmanymiddle: /* Loop if LOOP >= 2 */
  84. addib,COND(>) -1, %r31, fitmanymiddle /* Adjusted inner loop decr */
  85. pitlbe 0(%sr1, %r28)
  86. pitlbe,m %arg1(%sr1, %r28) /* Last pitlbe and addr adjust */
  87. addib,COND(>) -1, %r29, fitmanymiddle /* Middle loop decr */
  88. copy %arg3, %r31 /* Re-init inner loop count */
  89. movb,tr %arg0, %r28, fitmanyloop /* Re-init base addr */
  90. addib,COND(<=),n -1, %r22, fitdone /* Outer loop count decr */
  91. fitoneloop: /* Loop if LOOP = 1 */
  92. mtsp %r20, %sr1
  93. copy %arg0, %r28 /* init base addr */
  94. copy %arg2, %r29 /* init middle loop count */
  95. fitonemiddle: /* Loop if LOOP = 1 */
  96. addib,COND(>) -1, %r29, fitonemiddle /* Middle loop count decr */
  97. pitlbe,m %arg1(%sr1, %r28) /* pitlbe for one loop */
  98. addib,COND(>) -1, %r22, fitoneloop /* Outer loop count decr */
  99. add %r21, %r20, %r20 /* increment space */
  100. fitdone:
  101. /* Flush Data Tlb */
  102. LDREG DTLB_SID_BASE(%r1), %r20
  103. LDREG DTLB_SID_STRIDE(%r1), %r21
  104. LDREG DTLB_SID_COUNT(%r1), %r22
  105. LDREG DTLB_OFF_BASE(%r1), %arg0
  106. LDREG DTLB_OFF_STRIDE(%r1), %arg1
  107. LDREG DTLB_OFF_COUNT(%r1), %arg2
  108. LDREG DTLB_LOOP(%r1), %arg3
  109. addib,COND(=) -1, %arg3, fdtoneloop /* Preadjust and test */
  110. movb,<,n %arg3, %r31, fdtdone /* If loop < 0, skip */
  111. copy %arg0, %r28 /* Init base addr */
  112. fdtmanyloop: /* Loop if LOOP >= 2 */
  113. mtsp %r20, %sr1
  114. add %r21, %r20, %r20 /* increment space */
  115. copy %arg2, %r29 /* Init middle loop count */
  116. fdtmanymiddle: /* Loop if LOOP >= 2 */
  117. addib,COND(>) -1, %r31, fdtmanymiddle /* Adjusted inner loop decr */
  118. pdtlbe 0(%sr1, %r28)
  119. pdtlbe,m %arg1(%sr1, %r28) /* Last pdtlbe and addr adjust */
  120. addib,COND(>) -1, %r29, fdtmanymiddle /* Middle loop decr */
  121. copy %arg3, %r31 /* Re-init inner loop count */
  122. movb,tr %arg0, %r28, fdtmanyloop /* Re-init base addr */
  123. addib,COND(<=),n -1, %r22,fdtdone /* Outer loop count decr */
  124. fdtoneloop: /* Loop if LOOP = 1 */
  125. mtsp %r20, %sr1
  126. copy %arg0, %r28 /* init base addr */
  127. copy %arg2, %r29 /* init middle loop count */
  128. fdtonemiddle: /* Loop if LOOP = 1 */
  129. addib,COND(>) -1, %r29, fdtonemiddle /* Middle loop count decr */
  130. pdtlbe,m %arg1(%sr1, %r28) /* pdtlbe for one loop */
  131. addib,COND(>) -1, %r22, fdtoneloop /* Outer loop count decr */
  132. add %r21, %r20, %r20 /* increment space */
  133. fdtdone:
  134. /*
  135. * Switch back to virtual mode
  136. */
  137. /* pcxt_ssm_bug */
  138. rsm PSW_SM_I, %r0
  139. load32 2f, %r1
  140. nop
  141. nop
  142. nop
  143. nop
  144. nop
  145. rsm PSW_SM_Q, %r0 /* prep to load iia queue */
  146. mtctl %r0, %cr17 /* Clear IIASQ tail */
  147. mtctl %r0, %cr17 /* Clear IIASQ head */
  148. mtctl %r1, %cr18 /* IIAOQ head */
  149. ldo 4(%r1), %r1
  150. mtctl %r1, %cr18 /* IIAOQ tail */
  151. load32 KERNEL_PSW, %r1
  152. or %r1, %r19, %r1 /* I-bit to state on entry */
  153. mtctl %r1, %ipsw /* restore I-bit (entire PSW) */
  154. rfi
  155. nop
  156. 2: bv %r0(%r2)
  157. nop
  158. .exit
  159. .procend
  160. ENDPROC(flush_tlb_all_local)
  161. .import cache_info,data
  162. ENTRY(flush_instruction_cache_local)
  163. .proc
  164. .callinfo NO_CALLS
  165. .entry
  166. load32 cache_info, %r1
  167. /* Flush Instruction Cache */
  168. LDREG ICACHE_BASE(%r1), %arg0
  169. LDREG ICACHE_STRIDE(%r1), %arg1
  170. LDREG ICACHE_COUNT(%r1), %arg2
  171. LDREG ICACHE_LOOP(%r1), %arg3
  172. rsm PSW_SM_I, %r22 /* No mmgt ops during loop*/
  173. mtsp %r0, %sr1
  174. addib,COND(=) -1, %arg3, fioneloop /* Preadjust and test */
  175. movb,<,n %arg3, %r31, fisync /* If loop < 0, do sync */
  176. fimanyloop: /* Loop if LOOP >= 2 */
  177. addib,COND(>) -1, %r31, fimanyloop /* Adjusted inner loop decr */
  178. fice %r0(%sr1, %arg0)
  179. fice,m %arg1(%sr1, %arg0) /* Last fice and addr adjust */
  180. movb,tr %arg3, %r31, fimanyloop /* Re-init inner loop count */
  181. addib,COND(<=),n -1, %arg2, fisync /* Outer loop decr */
  182. fioneloop: /* Loop if LOOP = 1 */
  183. /* Some implementations may flush with a single fice instruction */
  184. cmpib,COND(>>=),n 15, %arg2, fioneloop2
  185. fioneloop1:
  186. fice,m %arg1(%sr1, %arg0)
  187. fice,m %arg1(%sr1, %arg0)
  188. fice,m %arg1(%sr1, %arg0)
  189. fice,m %arg1(%sr1, %arg0)
  190. fice,m %arg1(%sr1, %arg0)
  191. fice,m %arg1(%sr1, %arg0)
  192. fice,m %arg1(%sr1, %arg0)
  193. fice,m %arg1(%sr1, %arg0)
  194. fice,m %arg1(%sr1, %arg0)
  195. fice,m %arg1(%sr1, %arg0)
  196. fice,m %arg1(%sr1, %arg0)
  197. fice,m %arg1(%sr1, %arg0)
  198. fice,m %arg1(%sr1, %arg0)
  199. fice,m %arg1(%sr1, %arg0)
  200. fice,m %arg1(%sr1, %arg0)
  201. addib,COND(>) -16, %arg2, fioneloop1
  202. fice,m %arg1(%sr1, %arg0)
  203. /* Check if done */
  204. cmpb,COND(=),n %arg2, %r0, fisync /* Predict branch taken */
  205. fioneloop2:
  206. addib,COND(>) -1, %arg2, fioneloop2 /* Outer loop count decr */
  207. fice,m %arg1(%sr1, %arg0) /* Fice for one loop */
  208. fisync:
  209. sync
  210. mtsm %r22 /* restore I-bit */
  211. bv %r0(%r2)
  212. nop
  213. .exit
  214. .procend
  215. ENDPROC(flush_instruction_cache_local)
  216. .import cache_info, data
  217. ENTRY(flush_data_cache_local)
  218. .proc
  219. .callinfo NO_CALLS
  220. .entry
  221. load32 cache_info, %r1
  222. /* Flush Data Cache */
  223. LDREG DCACHE_BASE(%r1), %arg0
  224. LDREG DCACHE_STRIDE(%r1), %arg1
  225. LDREG DCACHE_COUNT(%r1), %arg2
  226. LDREG DCACHE_LOOP(%r1), %arg3
  227. rsm PSW_SM_I, %r22 /* No mmgt ops during loop*/
  228. mtsp %r0, %sr1
  229. addib,COND(=) -1, %arg3, fdoneloop /* Preadjust and test */
  230. movb,<,n %arg3, %r31, fdsync /* If loop < 0, do sync */
  231. fdmanyloop: /* Loop if LOOP >= 2 */
  232. addib,COND(>) -1, %r31, fdmanyloop /* Adjusted inner loop decr */
  233. fdce %r0(%sr1, %arg0)
  234. fdce,m %arg1(%sr1, %arg0) /* Last fdce and addr adjust */
  235. movb,tr %arg3, %r31, fdmanyloop /* Re-init inner loop count */
  236. addib,COND(<=),n -1, %arg2, fdsync /* Outer loop decr */
  237. fdoneloop: /* Loop if LOOP = 1 */
  238. /* Some implementations may flush with a single fdce instruction */
  239. cmpib,COND(>>=),n 15, %arg2, fdoneloop2
  240. fdoneloop1:
  241. fdce,m %arg1(%sr1, %arg0)
  242. fdce,m %arg1(%sr1, %arg0)
  243. fdce,m %arg1(%sr1, %arg0)
  244. fdce,m %arg1(%sr1, %arg0)
  245. fdce,m %arg1(%sr1, %arg0)
  246. fdce,m %arg1(%sr1, %arg0)
  247. fdce,m %arg1(%sr1, %arg0)
  248. fdce,m %arg1(%sr1, %arg0)
  249. fdce,m %arg1(%sr1, %arg0)
  250. fdce,m %arg1(%sr1, %arg0)
  251. fdce,m %arg1(%sr1, %arg0)
  252. fdce,m %arg1(%sr1, %arg0)
  253. fdce,m %arg1(%sr1, %arg0)
  254. fdce,m %arg1(%sr1, %arg0)
  255. fdce,m %arg1(%sr1, %arg0)
  256. addib,COND(>) -16, %arg2, fdoneloop1
  257. fdce,m %arg1(%sr1, %arg0)
  258. /* Check if done */
  259. cmpb,COND(=),n %arg2, %r0, fdsync /* Predict branch taken */
  260. fdoneloop2:
  261. addib,COND(>) -1, %arg2, fdoneloop2 /* Outer loop count decr */
  262. fdce,m %arg1(%sr1, %arg0) /* Fdce for one loop */
  263. fdsync:
  264. syncdma
  265. sync
  266. mtsm %r22 /* restore I-bit */
  267. bv %r0(%r2)
  268. nop
  269. .exit
  270. .procend
  271. ENDPROC(flush_data_cache_local)
  272. .align 16
  273. /* Macros to serialize TLB purge operations on SMP. */
  274. .macro tlb_lock la,flags,tmp
  275. #ifdef CONFIG_SMP
  276. ldil L%pa_tlb_lock,%r1
  277. ldo R%pa_tlb_lock(%r1),\la
  278. rsm PSW_SM_I,\flags
  279. 1: LDCW 0(\la),\tmp
  280. cmpib,<>,n 0,\tmp,3f
  281. 2: ldw 0(\la),\tmp
  282. cmpb,<> %r0,\tmp,1b
  283. nop
  284. b,n 2b
  285. 3:
  286. #endif
  287. .endm
  288. .macro tlb_unlock la,flags,tmp
  289. #ifdef CONFIG_SMP
  290. ldi 1,\tmp
  291. stw \tmp,0(\la)
  292. mtsm \flags
  293. #endif
  294. .endm
  295. /* Clear page using kernel mapping. */
  296. ENTRY(clear_page_asm)
  297. .proc
  298. .callinfo NO_CALLS
  299. .entry
  300. #ifdef CONFIG_64BIT
  301. /* Unroll the loop. */
  302. ldi (PAGE_SIZE / 128), %r1
  303. 1:
  304. std %r0, 0(%r26)
  305. std %r0, 8(%r26)
  306. std %r0, 16(%r26)
  307. std %r0, 24(%r26)
  308. std %r0, 32(%r26)
  309. std %r0, 40(%r26)
  310. std %r0, 48(%r26)
  311. std %r0, 56(%r26)
  312. std %r0, 64(%r26)
  313. std %r0, 72(%r26)
  314. std %r0, 80(%r26)
  315. std %r0, 88(%r26)
  316. std %r0, 96(%r26)
  317. std %r0, 104(%r26)
  318. std %r0, 112(%r26)
  319. std %r0, 120(%r26)
  320. /* Note reverse branch hint for addib is taken. */
  321. addib,COND(>),n -1, %r1, 1b
  322. ldo 128(%r26), %r26
  323. #else
  324. /*
  325. * Note that until (if) we start saving the full 64-bit register
  326. * values on interrupt, we can't use std on a 32 bit kernel.
  327. */
  328. ldi (PAGE_SIZE / 64), %r1
  329. 1:
  330. stw %r0, 0(%r26)
  331. stw %r0, 4(%r26)
  332. stw %r0, 8(%r26)
  333. stw %r0, 12(%r26)
  334. stw %r0, 16(%r26)
  335. stw %r0, 20(%r26)
  336. stw %r0, 24(%r26)
  337. stw %r0, 28(%r26)
  338. stw %r0, 32(%r26)
  339. stw %r0, 36(%r26)
  340. stw %r0, 40(%r26)
  341. stw %r0, 44(%r26)
  342. stw %r0, 48(%r26)
  343. stw %r0, 52(%r26)
  344. stw %r0, 56(%r26)
  345. stw %r0, 60(%r26)
  346. addib,COND(>),n -1, %r1, 1b
  347. ldo 64(%r26), %r26
  348. #endif
  349. bv %r0(%r2)
  350. nop
  351. .exit
  352. .procend
  353. ENDPROC(clear_page_asm)
  354. /* Copy page using kernel mapping. */
  355. ENTRY(copy_page_asm)
  356. .proc
  357. .callinfo NO_CALLS
  358. .entry
  359. #ifdef CONFIG_64BIT
  360. /* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
  361. * Unroll the loop by hand and arrange insn appropriately.
  362. * Prefetch doesn't improve performance on rp3440.
  363. * GCC probably can do this just as well...
  364. */
  365. ldi (PAGE_SIZE / 128), %r1
  366. 1: ldd 0(%r25), %r19
  367. ldd 8(%r25), %r20
  368. ldd 16(%r25), %r21
  369. ldd 24(%r25), %r22
  370. std %r19, 0(%r26)
  371. std %r20, 8(%r26)
  372. ldd 32(%r25), %r19
  373. ldd 40(%r25), %r20
  374. std %r21, 16(%r26)
  375. std %r22, 24(%r26)
  376. ldd 48(%r25), %r21
  377. ldd 56(%r25), %r22
  378. std %r19, 32(%r26)
  379. std %r20, 40(%r26)
  380. ldd 64(%r25), %r19
  381. ldd 72(%r25), %r20
  382. std %r21, 48(%r26)
  383. std %r22, 56(%r26)
  384. ldd 80(%r25), %r21
  385. ldd 88(%r25), %r22
  386. std %r19, 64(%r26)
  387. std %r20, 72(%r26)
  388. ldd 96(%r25), %r19
  389. ldd 104(%r25), %r20
  390. std %r21, 80(%r26)
  391. std %r22, 88(%r26)
  392. ldd 112(%r25), %r21
  393. ldd 120(%r25), %r22
  394. ldo 128(%r25), %r25
  395. std %r19, 96(%r26)
  396. std %r20, 104(%r26)
  397. std %r21, 112(%r26)
  398. std %r22, 120(%r26)
  399. /* Note reverse branch hint for addib is taken. */
  400. addib,COND(>),n -1, %r1, 1b
  401. ldo 128(%r26), %r26
  402. #else
  403. /*
  404. * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
  405. * bundles (very restricted rules for bundling).
  406. * Note that until (if) we start saving
  407. * the full 64 bit register values on interrupt, we can't
  408. * use ldd/std on a 32 bit kernel.
  409. */
  410. ldw 0(%r25), %r19
  411. ldi (PAGE_SIZE / 64), %r1
  412. 1:
  413. ldw 4(%r25), %r20
  414. ldw 8(%r25), %r21
  415. ldw 12(%r25), %r22
  416. stw %r19, 0(%r26)
  417. stw %r20, 4(%r26)
  418. stw %r21, 8(%r26)
  419. stw %r22, 12(%r26)
  420. ldw 16(%r25), %r19
  421. ldw 20(%r25), %r20
  422. ldw 24(%r25), %r21
  423. ldw 28(%r25), %r22
  424. stw %r19, 16(%r26)
  425. stw %r20, 20(%r26)
  426. stw %r21, 24(%r26)
  427. stw %r22, 28(%r26)
  428. ldw 32(%r25), %r19
  429. ldw 36(%r25), %r20
  430. ldw 40(%r25), %r21
  431. ldw 44(%r25), %r22
  432. stw %r19, 32(%r26)
  433. stw %r20, 36(%r26)
  434. stw %r21, 40(%r26)
  435. stw %r22, 44(%r26)
  436. ldw 48(%r25), %r19
  437. ldw 52(%r25), %r20
  438. ldw 56(%r25), %r21
  439. ldw 60(%r25), %r22
  440. stw %r19, 48(%r26)
  441. stw %r20, 52(%r26)
  442. ldo 64(%r25), %r25
  443. stw %r21, 56(%r26)
  444. stw %r22, 60(%r26)
  445. ldo 64(%r26), %r26
  446. addib,COND(>),n -1, %r1, 1b
  447. ldw 0(%r25), %r19
  448. #endif
  449. bv %r0(%r2)
  450. nop
  451. .exit
  452. .procend
  453. ENDPROC(copy_page_asm)
  454. /*
  455. * NOTE: Code in clear_user_page has a hard coded dependency on the
  456. * maximum alias boundary being 4 Mb. We've been assured by the
  457. * parisc chip designers that there will not ever be a parisc
  458. * chip with a larger alias boundary (Never say never :-) ).
  459. *
  460. * Subtle: the dtlb miss handlers support the temp alias region by
  461. * "knowing" that if a dtlb miss happens within the temp alias
  462. * region it must have occurred while in clear_user_page. Since
  463. * this routine makes use of processor local translations, we
  464. * don't want to insert them into the kernel page table. Instead,
  465. * we load up some general registers (they need to be registers
  466. * which aren't shadowed) with the physical page numbers (preshifted
  467. * for tlb insertion) needed to insert the translations. When we
  468. * miss on the translation, the dtlb miss handler inserts the
  469. * translation into the tlb using these values:
  470. *
  471. * %r26 physical page (shifted for tlb insert) of "to" translation
  472. * %r23 physical page (shifted for tlb insert) of "from" translation
  473. */
  474. /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
  475. #define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
  476. .macro convert_phys_for_tlb_insert20 phys
  477. extrd,u \phys, 56-PAGE_ADD_SHIFT, 32-PAGE_ADD_SHIFT, \phys
  478. #if _PAGE_SIZE_ENCODING_DEFAULT
  479. depdi _PAGE_SIZE_ENCODING_DEFAULT, 63, (63-58), \phys
  480. #endif
  481. .endm
  482. /*
  483. * copy_user_page_asm() performs a page copy using mappings
  484. * equivalent to the user page mappings. It can be used to
  485. * implement copy_user_page() but unfortunately both the `from'
  486. * and `to' pages need to be flushed through mappings equivalent
  487. * to the user mappings after the copy because the kernel accesses
  488. * the `from' page through the kmap kernel mapping and the `to'
  489. * page needs to be flushed since code can be copied. As a
  490. * result, this implementation is less efficient than the simpler
  491. * copy using the kernel mapping. It only needs the `from' page
  492. * to flushed via the user mapping. The kunmap routines handle
  493. * the flushes needed for the kernel mapping.
  494. *
  495. * I'm still keeping this around because it may be possible to
  496. * use it if more information is passed into copy_user_page().
  497. * Have to do some measurements to see if it is worthwhile to
  498. * lobby for such a change.
  499. *
  500. */
  501. ENTRY(copy_user_page_asm)
  502. .proc
  503. .callinfo NO_CALLS
  504. .entry
  505. /* Convert virtual `to' and `from' addresses to physical addresses.
  506. Move `from' physical address to non shadowed register. */
  507. ldil L%(__PAGE_OFFSET), %r1
  508. sub %r26, %r1, %r26
  509. sub %r25, %r1, %r23
  510. ldil L%(TMPALIAS_MAP_START), %r28
  511. #ifdef CONFIG_64BIT
  512. #if (TMPALIAS_MAP_START >= 0x80000000)
  513. depdi 0, 31,32, %r28 /* clear any sign extension */
  514. #endif
  515. convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
  516. convert_phys_for_tlb_insert20 %r23 /* convert phys addr to tlb insert format */
  517. depd %r24,63,22, %r28 /* Form aliased virtual address 'to' */
  518. depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
  519. copy %r28, %r29
  520. depdi 1, 41,1, %r29 /* Form aliased virtual address 'from' */
  521. #else
  522. extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
  523. extrw,u %r23, 24,25, %r23 /* convert phys addr to tlb insert format */
  524. depw %r24, 31,22, %r28 /* Form aliased virtual address 'to' */
  525. depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
  526. copy %r28, %r29
  527. depwi 1, 9,1, %r29 /* Form aliased virtual address 'from' */
  528. #endif
  529. /* Purge any old translations */
  530. #ifdef CONFIG_PA20
  531. pdtlb,l 0(%r28)
  532. pdtlb,l 0(%r29)
  533. #else
  534. tlb_lock %r20,%r21,%r22
  535. pdtlb 0(%r28)
  536. pdtlb 0(%r29)
  537. tlb_unlock %r20,%r21,%r22
  538. #endif
  539. #ifdef CONFIG_64BIT
  540. /* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
  541. * Unroll the loop by hand and arrange insn appropriately.
  542. * GCC probably can do this just as well.
  543. */
  544. ldd 0(%r29), %r19
  545. ldi (PAGE_SIZE / 128), %r1
  546. 1: ldd 8(%r29), %r20
  547. ldd 16(%r29), %r21
  548. ldd 24(%r29), %r22
  549. std %r19, 0(%r28)
  550. std %r20, 8(%r28)
  551. ldd 32(%r29), %r19
  552. ldd 40(%r29), %r20
  553. std %r21, 16(%r28)
  554. std %r22, 24(%r28)
  555. ldd 48(%r29), %r21
  556. ldd 56(%r29), %r22
  557. std %r19, 32(%r28)
  558. std %r20, 40(%r28)
  559. ldd 64(%r29), %r19
  560. ldd 72(%r29), %r20
  561. std %r21, 48(%r28)
  562. std %r22, 56(%r28)
  563. ldd 80(%r29), %r21
  564. ldd 88(%r29), %r22
  565. std %r19, 64(%r28)
  566. std %r20, 72(%r28)
  567. ldd 96(%r29), %r19
  568. ldd 104(%r29), %r20
  569. std %r21, 80(%r28)
  570. std %r22, 88(%r28)
  571. ldd 112(%r29), %r21
  572. ldd 120(%r29), %r22
  573. std %r19, 96(%r28)
  574. std %r20, 104(%r28)
  575. ldo 128(%r29), %r29
  576. std %r21, 112(%r28)
  577. std %r22, 120(%r28)
  578. ldo 128(%r28), %r28
  579. /* conditional branches nullify on forward taken branch, and on
  580. * non-taken backward branch. Note that .+4 is a backwards branch.
  581. * The ldd should only get executed if the branch is taken.
  582. */
  583. addib,COND(>),n -1, %r1, 1b /* bundle 10 */
  584. ldd 0(%r29), %r19 /* start next loads */
  585. #else
  586. ldi (PAGE_SIZE / 64), %r1
  587. /*
  588. * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
  589. * bundles (very restricted rules for bundling). It probably
  590. * does OK on PCXU and better, but we could do better with
  591. * ldd/std instructions. Note that until (if) we start saving
  592. * the full 64 bit register values on interrupt, we can't
  593. * use ldd/std on a 32 bit kernel.
  594. */
  595. 1: ldw 0(%r29), %r19
  596. ldw 4(%r29), %r20
  597. ldw 8(%r29), %r21
  598. ldw 12(%r29), %r22
  599. stw %r19, 0(%r28)
  600. stw %r20, 4(%r28)
  601. stw %r21, 8(%r28)
  602. stw %r22, 12(%r28)
  603. ldw 16(%r29), %r19
  604. ldw 20(%r29), %r20
  605. ldw 24(%r29), %r21
  606. ldw 28(%r29), %r22
  607. stw %r19, 16(%r28)
  608. stw %r20, 20(%r28)
  609. stw %r21, 24(%r28)
  610. stw %r22, 28(%r28)
  611. ldw 32(%r29), %r19
  612. ldw 36(%r29), %r20
  613. ldw 40(%r29), %r21
  614. ldw 44(%r29), %r22
  615. stw %r19, 32(%r28)
  616. stw %r20, 36(%r28)
  617. stw %r21, 40(%r28)
  618. stw %r22, 44(%r28)
  619. ldw 48(%r29), %r19
  620. ldw 52(%r29), %r20
  621. ldw 56(%r29), %r21
  622. ldw 60(%r29), %r22
  623. stw %r19, 48(%r28)
  624. stw %r20, 52(%r28)
  625. stw %r21, 56(%r28)
  626. stw %r22, 60(%r28)
  627. ldo 64(%r28), %r28
  628. addib,COND(>) -1, %r1,1b
  629. ldo 64(%r29), %r29
  630. #endif
  631. bv %r0(%r2)
  632. nop
  633. .exit
  634. .procend
  635. ENDPROC(copy_user_page_asm)
  636. ENTRY(clear_user_page_asm)
  637. .proc
  638. .callinfo NO_CALLS
  639. .entry
  640. tophys_r1 %r26
  641. ldil L%(TMPALIAS_MAP_START), %r28
  642. #ifdef CONFIG_64BIT
  643. #if (TMPALIAS_MAP_START >= 0x80000000)
  644. depdi 0, 31,32, %r28 /* clear any sign extension */
  645. #endif
  646. convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
  647. depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
  648. depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
  649. #else
  650. extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
  651. depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
  652. depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
  653. #endif
  654. /* Purge any old translation */
  655. #ifdef CONFIG_PA20
  656. pdtlb,l 0(%r28)
  657. #else
  658. tlb_lock %r20,%r21,%r22
  659. pdtlb 0(%r28)
  660. tlb_unlock %r20,%r21,%r22
  661. #endif
  662. #ifdef CONFIG_64BIT
  663. ldi (PAGE_SIZE / 128), %r1
  664. /* PREFETCH (Write) has not (yet) been proven to help here */
  665. /* #define PREFETCHW_OP ldd 256(%0), %r0 */
  666. 1: std %r0, 0(%r28)
  667. std %r0, 8(%r28)
  668. std %r0, 16(%r28)
  669. std %r0, 24(%r28)
  670. std %r0, 32(%r28)
  671. std %r0, 40(%r28)
  672. std %r0, 48(%r28)
  673. std %r0, 56(%r28)
  674. std %r0, 64(%r28)
  675. std %r0, 72(%r28)
  676. std %r0, 80(%r28)
  677. std %r0, 88(%r28)
  678. std %r0, 96(%r28)
  679. std %r0, 104(%r28)
  680. std %r0, 112(%r28)
  681. std %r0, 120(%r28)
  682. addib,COND(>) -1, %r1, 1b
  683. ldo 128(%r28), %r28
  684. #else /* ! CONFIG_64BIT */
  685. ldi (PAGE_SIZE / 64), %r1
  686. 1: stw %r0, 0(%r28)
  687. stw %r0, 4(%r28)
  688. stw %r0, 8(%r28)
  689. stw %r0, 12(%r28)
  690. stw %r0, 16(%r28)
  691. stw %r0, 20(%r28)
  692. stw %r0, 24(%r28)
  693. stw %r0, 28(%r28)
  694. stw %r0, 32(%r28)
  695. stw %r0, 36(%r28)
  696. stw %r0, 40(%r28)
  697. stw %r0, 44(%r28)
  698. stw %r0, 48(%r28)
  699. stw %r0, 52(%r28)
  700. stw %r0, 56(%r28)
  701. stw %r0, 60(%r28)
  702. addib,COND(>) -1, %r1, 1b
  703. ldo 64(%r28), %r28
  704. #endif /* CONFIG_64BIT */
  705. bv %r0(%r2)
  706. nop
  707. .exit
  708. .procend
  709. ENDPROC(clear_user_page_asm)
  710. ENTRY(flush_dcache_page_asm)
  711. .proc
  712. .callinfo NO_CALLS
  713. .entry
  714. ldil L%(TMPALIAS_MAP_START), %r28
  715. #ifdef CONFIG_64BIT
  716. #if (TMPALIAS_MAP_START >= 0x80000000)
  717. depdi 0, 31,32, %r28 /* clear any sign extension */
  718. #endif
  719. convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
  720. depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
  721. depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
  722. #else
  723. extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
  724. depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
  725. depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
  726. #endif
  727. /* Purge any old translation */
  728. #ifdef CONFIG_PA20
  729. pdtlb,l 0(%r28)
  730. #else
  731. tlb_lock %r20,%r21,%r22
  732. pdtlb 0(%r28)
  733. tlb_unlock %r20,%r21,%r22
  734. #endif
  735. ldil L%dcache_stride, %r1
  736. ldw R%dcache_stride(%r1), r31
  737. #ifdef CONFIG_64BIT
  738. depdi,z 1, 63-PAGE_SHIFT,1, %r25
  739. #else
  740. depwi,z 1, 31-PAGE_SHIFT,1, %r25
  741. #endif
  742. add %r28, %r25, %r25
  743. sub %r25, r31, %r25
  744. 1: fdc,m r31(%r28)
  745. fdc,m r31(%r28)
  746. fdc,m r31(%r28)
  747. fdc,m r31(%r28)
  748. fdc,m r31(%r28)
  749. fdc,m r31(%r28)
  750. fdc,m r31(%r28)
  751. fdc,m r31(%r28)
  752. fdc,m r31(%r28)
  753. fdc,m r31(%r28)
  754. fdc,m r31(%r28)
  755. fdc,m r31(%r28)
  756. fdc,m r31(%r28)
  757. fdc,m r31(%r28)
  758. fdc,m r31(%r28)
  759. cmpb,COND(<<) %r28, %r25,1b
  760. fdc,m r31(%r28)
  761. sync
  762. #ifdef CONFIG_PA20
  763. pdtlb,l 0(%r25)
  764. #else
  765. tlb_lock %r20,%r21,%r22
  766. pdtlb 0(%r25)
  767. tlb_unlock %r20,%r21,%r22
  768. #endif
  769. bv %r0(%r2)
  770. nop
  771. .exit
  772. .procend
  773. ENDPROC(flush_dcache_page_asm)
  774. ENTRY(flush_icache_page_asm)
  775. .proc
  776. .callinfo NO_CALLS
  777. .entry
  778. ldil L%(TMPALIAS_MAP_START), %r28
  779. #ifdef CONFIG_64BIT
  780. #if (TMPALIAS_MAP_START >= 0x80000000)
  781. depdi 0, 31,32, %r28 /* clear any sign extension */
  782. #endif
  783. convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
  784. depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
  785. depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
  786. #else
  787. extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
  788. depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
  789. depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
  790. #endif
  791. /* Purge any old translation */
  792. #ifdef CONFIG_PA20
  793. pitlb,l %r0(%sr4,%r28)
  794. #else
  795. tlb_lock %r20,%r21,%r22
  796. pitlb (%sr4,%r28)
  797. tlb_unlock %r20,%r21,%r22
  798. #endif
  799. ldil L%icache_stride, %r1
  800. ldw R%icache_stride(%r1), %r31
  801. #ifdef CONFIG_64BIT
  802. depdi,z 1, 63-PAGE_SHIFT,1, %r25
  803. #else
  804. depwi,z 1, 31-PAGE_SHIFT,1, %r25
  805. #endif
  806. add %r28, %r25, %r25
  807. sub %r25, %r31, %r25
  808. /* fic only has the type 26 form on PA1.1, requiring an
  809. * explicit space specification, so use %sr4 */
  810. 1: fic,m %r31(%sr4,%r28)
  811. fic,m %r31(%sr4,%r28)
  812. fic,m %r31(%sr4,%r28)
  813. fic,m %r31(%sr4,%r28)
  814. fic,m %r31(%sr4,%r28)
  815. fic,m %r31(%sr4,%r28)
  816. fic,m %r31(%sr4,%r28)
  817. fic,m %r31(%sr4,%r28)
  818. fic,m %r31(%sr4,%r28)
  819. fic,m %r31(%sr4,%r28)
  820. fic,m %r31(%sr4,%r28)
  821. fic,m %r31(%sr4,%r28)
  822. fic,m %r31(%sr4,%r28)
  823. fic,m %r31(%sr4,%r28)
  824. fic,m %r31(%sr4,%r28)
  825. cmpb,COND(<<) %r28, %r25,1b
  826. fic,m %r31(%sr4,%r28)
  827. sync
  828. #ifdef CONFIG_PA20
  829. pitlb,l %r0(%sr4,%r25)
  830. #else
  831. tlb_lock %r20,%r21,%r22
  832. pitlb (%sr4,%r25)
  833. tlb_unlock %r20,%r21,%r22
  834. #endif
  835. bv %r0(%r2)
  836. nop
  837. .exit
  838. .procend
  839. ENDPROC(flush_icache_page_asm)
  840. ENTRY(flush_kernel_dcache_page_asm)
  841. .proc
  842. .callinfo NO_CALLS
  843. .entry
  844. ldil L%dcache_stride, %r1
  845. ldw R%dcache_stride(%r1), %r23
  846. #ifdef CONFIG_64BIT
  847. depdi,z 1, 63-PAGE_SHIFT,1, %r25
  848. #else
  849. depwi,z 1, 31-PAGE_SHIFT,1, %r25
  850. #endif
  851. add %r26, %r25, %r25
  852. sub %r25, %r23, %r25
  853. 1: fdc,m %r23(%r26)
  854. fdc,m %r23(%r26)
  855. fdc,m %r23(%r26)
  856. fdc,m %r23(%r26)
  857. fdc,m %r23(%r26)
  858. fdc,m %r23(%r26)
  859. fdc,m %r23(%r26)
  860. fdc,m %r23(%r26)
  861. fdc,m %r23(%r26)
  862. fdc,m %r23(%r26)
  863. fdc,m %r23(%r26)
  864. fdc,m %r23(%r26)
  865. fdc,m %r23(%r26)
  866. fdc,m %r23(%r26)
  867. fdc,m %r23(%r26)
  868. cmpb,COND(<<) %r26, %r25,1b
  869. fdc,m %r23(%r26)
  870. sync
  871. bv %r0(%r2)
  872. nop
  873. .exit
  874. .procend
  875. ENDPROC(flush_kernel_dcache_page_asm)
  876. ENTRY(purge_kernel_dcache_page_asm)
  877. .proc
  878. .callinfo NO_CALLS
  879. .entry
  880. ldil L%dcache_stride, %r1
  881. ldw R%dcache_stride(%r1), %r23
  882. #ifdef CONFIG_64BIT
  883. depdi,z 1, 63-PAGE_SHIFT,1, %r25
  884. #else
  885. depwi,z 1, 31-PAGE_SHIFT,1, %r25
  886. #endif
  887. add %r26, %r25, %r25
  888. sub %r25, %r23, %r25
  889. 1: pdc,m %r23(%r26)
  890. pdc,m %r23(%r26)
  891. pdc,m %r23(%r26)
  892. pdc,m %r23(%r26)
  893. pdc,m %r23(%r26)
  894. pdc,m %r23(%r26)
  895. pdc,m %r23(%r26)
  896. pdc,m %r23(%r26)
  897. pdc,m %r23(%r26)
  898. pdc,m %r23(%r26)
  899. pdc,m %r23(%r26)
  900. pdc,m %r23(%r26)
  901. pdc,m %r23(%r26)
  902. pdc,m %r23(%r26)
  903. pdc,m %r23(%r26)
  904. cmpb,COND(<<) %r26, %r25, 1b
  905. pdc,m %r23(%r26)
  906. sync
  907. bv %r0(%r2)
  908. nop
  909. .exit
  910. .procend
  911. ENDPROC(purge_kernel_dcache_page_asm)
  912. ENTRY(flush_user_dcache_range_asm)
  913. .proc
  914. .callinfo NO_CALLS
  915. .entry
  916. ldil L%dcache_stride, %r1
  917. ldw R%dcache_stride(%r1), %r23
  918. ldo -1(%r23), %r21
  919. ANDCM %r26, %r21, %r26
  920. 1: cmpb,COND(<<),n %r26, %r25, 1b
  921. fdc,m %r23(%sr3, %r26)
  922. sync
  923. bv %r0(%r2)
  924. nop
  925. .exit
  926. .procend
  927. ENDPROC(flush_user_dcache_range_asm)
  928. ENTRY(flush_kernel_dcache_range_asm)
  929. .proc
  930. .callinfo NO_CALLS
  931. .entry
  932. ldil L%dcache_stride, %r1
  933. ldw R%dcache_stride(%r1), %r23
  934. ldo -1(%r23), %r21
  935. ANDCM %r26, %r21, %r26
  936. 1: cmpb,COND(<<),n %r26, %r25,1b
  937. fdc,m %r23(%r26)
  938. sync
  939. syncdma
  940. bv %r0(%r2)
  941. nop
  942. .exit
  943. .procend
  944. ENDPROC(flush_kernel_dcache_range_asm)
  945. ENTRY(flush_user_icache_range_asm)
  946. .proc
  947. .callinfo NO_CALLS
  948. .entry
  949. ldil L%icache_stride, %r1
  950. ldw R%icache_stride(%r1), %r23
  951. ldo -1(%r23), %r21
  952. ANDCM %r26, %r21, %r26
  953. 1: cmpb,COND(<<),n %r26, %r25,1b
  954. fic,m %r23(%sr3, %r26)
  955. sync
  956. bv %r0(%r2)
  957. nop
  958. .exit
  959. .procend
  960. ENDPROC(flush_user_icache_range_asm)
  961. ENTRY(flush_kernel_icache_page)
  962. .proc
  963. .callinfo NO_CALLS
  964. .entry
  965. ldil L%icache_stride, %r1
  966. ldw R%icache_stride(%r1), %r23
  967. #ifdef CONFIG_64BIT
  968. depdi,z 1, 63-PAGE_SHIFT,1, %r25
  969. #else
  970. depwi,z 1, 31-PAGE_SHIFT,1, %r25
  971. #endif
  972. add %r26, %r25, %r25
  973. sub %r25, %r23, %r25
  974. 1: fic,m %r23(%sr4, %r26)
  975. fic,m %r23(%sr4, %r26)
  976. fic,m %r23(%sr4, %r26)
  977. fic,m %r23(%sr4, %r26)
  978. fic,m %r23(%sr4, %r26)
  979. fic,m %r23(%sr4, %r26)
  980. fic,m %r23(%sr4, %r26)
  981. fic,m %r23(%sr4, %r26)
  982. fic,m %r23(%sr4, %r26)
  983. fic,m %r23(%sr4, %r26)
  984. fic,m %r23(%sr4, %r26)
  985. fic,m %r23(%sr4, %r26)
  986. fic,m %r23(%sr4, %r26)
  987. fic,m %r23(%sr4, %r26)
  988. fic,m %r23(%sr4, %r26)
  989. cmpb,COND(<<) %r26, %r25, 1b
  990. fic,m %r23(%sr4, %r26)
  991. sync
  992. bv %r0(%r2)
  993. nop
  994. .exit
  995. .procend
  996. ENDPROC(flush_kernel_icache_page)
  997. ENTRY(flush_kernel_icache_range_asm)
  998. .proc
  999. .callinfo NO_CALLS
  1000. .entry
  1001. ldil L%icache_stride, %r1
  1002. ldw R%icache_stride(%r1), %r23
  1003. ldo -1(%r23), %r21
  1004. ANDCM %r26, %r21, %r26
  1005. 1: cmpb,COND(<<),n %r26, %r25, 1b
  1006. fic,m %r23(%sr4, %r26)
  1007. sync
  1008. bv %r0(%r2)
  1009. nop
  1010. .exit
  1011. .procend
  1012. ENDPROC(flush_kernel_icache_range_asm)
  1013. /* align should cover use of rfi in disable_sr_hashing_asm and
  1014. * srdis_done.
  1015. */
  1016. .align 256
  1017. ENTRY(disable_sr_hashing_asm)
  1018. .proc
  1019. .callinfo NO_CALLS
  1020. .entry
  1021. /*
  1022. * Switch to real mode
  1023. */
  1024. /* pcxt_ssm_bug */
  1025. rsm PSW_SM_I, %r0
  1026. load32 PA(1f), %r1
  1027. nop
  1028. nop
  1029. nop
  1030. nop
  1031. nop
  1032. rsm PSW_SM_Q, %r0 /* prep to load iia queue */
  1033. mtctl %r0, %cr17 /* Clear IIASQ tail */
  1034. mtctl %r0, %cr17 /* Clear IIASQ head */
  1035. mtctl %r1, %cr18 /* IIAOQ head */
  1036. ldo 4(%r1), %r1
  1037. mtctl %r1, %cr18 /* IIAOQ tail */
  1038. load32 REAL_MODE_PSW, %r1
  1039. mtctl %r1, %ipsw
  1040. rfi
  1041. nop
  1042. 1: cmpib,=,n SRHASH_PCXST, %r26,srdis_pcxs
  1043. cmpib,=,n SRHASH_PCXL, %r26,srdis_pcxl
  1044. cmpib,=,n SRHASH_PA20, %r26,srdis_pa20
  1045. b,n srdis_done
  1046. srdis_pcxs:
  1047. /* Disable Space Register Hashing for PCXS,PCXT,PCXT' */
  1048. .word 0x141c1a00 /* mfdiag %dr0, %r28 */
  1049. .word 0x141c1a00 /* must issue twice */
  1050. depwi 0,18,1, %r28 /* Clear DHE (dcache hash enable) */
  1051. depwi 0,20,1, %r28 /* Clear IHE (icache hash enable) */
  1052. .word 0x141c1600 /* mtdiag %r28, %dr0 */
  1053. .word 0x141c1600 /* must issue twice */
  1054. b,n srdis_done
  1055. srdis_pcxl:
  1056. /* Disable Space Register Hashing for PCXL */
  1057. .word 0x141c0600 /* mfdiag %dr0, %r28 */
  1058. depwi 0,28,2, %r28 /* Clear DHASH_EN & IHASH_EN */
  1059. .word 0x141c0240 /* mtdiag %r28, %dr0 */
  1060. b,n srdis_done
  1061. srdis_pa20:
  1062. /* Disable Space Register Hashing for PCXU,PCXU+,PCXW,PCXW+,PCXW2 */
  1063. .word 0x144008bc /* mfdiag %dr2, %r28 */
  1064. depdi 0, 54,1, %r28 /* clear DIAG_SPHASH_ENAB (bit 54) */
  1065. .word 0x145c1840 /* mtdiag %r28, %dr2 */
  1066. srdis_done:
  1067. /* Switch back to virtual mode */
  1068. rsm PSW_SM_I, %r0 /* prep to load iia queue */
  1069. load32 2f, %r1
  1070. nop
  1071. nop
  1072. nop
  1073. nop
  1074. nop
  1075. rsm PSW_SM_Q, %r0 /* prep to load iia queue */
  1076. mtctl %r0, %cr17 /* Clear IIASQ tail */
  1077. mtctl %r0, %cr17 /* Clear IIASQ head */
  1078. mtctl %r1, %cr18 /* IIAOQ head */
  1079. ldo 4(%r1), %r1
  1080. mtctl %r1, %cr18 /* IIAOQ tail */
  1081. load32 KERNEL_PSW, %r1
  1082. mtctl %r1, %ipsw
  1083. rfi
  1084. nop
  1085. 2: bv %r0(%r2)
  1086. nop
  1087. .exit
  1088. .procend
  1089. ENDPROC(disable_sr_hashing_asm)
  1090. .end