pacache.S 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315
  1. /*
  2. * PARISC TLB and cache flushing support
  3. * Copyright (C) 2000-2001 Hewlett-Packard (John Marvin)
  4. * Copyright (C) 2001 Matthew Wilcox (willy at parisc-linux.org)
  5. * Copyright (C) 2002 Richard Hirst (rhirst with parisc-linux.org)
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2, or (at your option)
  10. * any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  20. */
  21. /*
  22. * NOTE: fdc,fic, and pdc instructions that use base register modification
  23. * should only use index and base registers that are not shadowed,
  24. * so that the fast path emulation in the non access miss handler
  25. * can be used.
  26. */
  27. #ifdef CONFIG_64BIT
  28. .level 2.0w
  29. #else
  30. .level 2.0
  31. #endif
  32. #include <asm/psw.h>
  33. #include <asm/assembly.h>
  34. #include <asm/pgtable.h>
  35. #include <asm/cache.h>
  36. #include <asm/ldcw.h>
  37. #include <linux/linkage.h>
  38. #include <linux/init.h>
  39. .section .text.hot
  40. .align 16
  41. ENTRY_CFI(flush_tlb_all_local)
  42. .proc
  43. .callinfo NO_CALLS
  44. .entry
  45. /*
  46. * The pitlbe and pdtlbe instructions should only be used to
  47. * flush the entire tlb. Also, there needs to be no intervening
  48. * tlb operations, e.g. tlb misses, so the operation needs
  49. * to happen in real mode with all interruptions disabled.
  50. */
  51. /* pcxt_ssm_bug - relied upon translation! PA 2.0 Arch. F-4 and F-5 */
  52. rsm PSW_SM_I, %r19 /* save I-bit state */
  53. load32 PA(1f), %r1
  54. nop
  55. nop
  56. nop
  57. nop
  58. nop
  59. rsm PSW_SM_Q, %r0 /* prep to load iia queue */
  60. mtctl %r0, %cr17 /* Clear IIASQ tail */
  61. mtctl %r0, %cr17 /* Clear IIASQ head */
  62. mtctl %r1, %cr18 /* IIAOQ head */
  63. ldo 4(%r1), %r1
  64. mtctl %r1, %cr18 /* IIAOQ tail */
  65. load32 REAL_MODE_PSW, %r1
  66. mtctl %r1, %ipsw
  67. rfi
  68. nop
  69. 1: load32 PA(cache_info), %r1
  70. /* Flush Instruction Tlb */
  71. LDREG ITLB_SID_BASE(%r1), %r20
  72. LDREG ITLB_SID_STRIDE(%r1), %r21
  73. LDREG ITLB_SID_COUNT(%r1), %r22
  74. LDREG ITLB_OFF_BASE(%r1), %arg0
  75. LDREG ITLB_OFF_STRIDE(%r1), %arg1
  76. LDREG ITLB_OFF_COUNT(%r1), %arg2
  77. LDREG ITLB_LOOP(%r1), %arg3
  78. addib,COND(=) -1, %arg3, fitoneloop /* Preadjust and test */
  79. movb,<,n %arg3, %r31, fitdone /* If loop < 0, skip */
  80. copy %arg0, %r28 /* Init base addr */
  81. fitmanyloop: /* Loop if LOOP >= 2 */
  82. mtsp %r20, %sr1
  83. add %r21, %r20, %r20 /* increment space */
  84. copy %arg2, %r29 /* Init middle loop count */
  85. fitmanymiddle: /* Loop if LOOP >= 2 */
  86. addib,COND(>) -1, %r31, fitmanymiddle /* Adjusted inner loop decr */
  87. pitlbe %r0(%sr1, %r28)
  88. pitlbe,m %arg1(%sr1, %r28) /* Last pitlbe and addr adjust */
  89. addib,COND(>) -1, %r29, fitmanymiddle /* Middle loop decr */
  90. copy %arg3, %r31 /* Re-init inner loop count */
  91. movb,tr %arg0, %r28, fitmanyloop /* Re-init base addr */
  92. addib,COND(<=),n -1, %r22, fitdone /* Outer loop count decr */
  93. fitoneloop: /* Loop if LOOP = 1 */
  94. mtsp %r20, %sr1
  95. copy %arg0, %r28 /* init base addr */
  96. copy %arg2, %r29 /* init middle loop count */
  97. fitonemiddle: /* Loop if LOOP = 1 */
  98. addib,COND(>) -1, %r29, fitonemiddle /* Middle loop count decr */
  99. pitlbe,m %arg1(%sr1, %r28) /* pitlbe for one loop */
  100. addib,COND(>) -1, %r22, fitoneloop /* Outer loop count decr */
  101. add %r21, %r20, %r20 /* increment space */
  102. fitdone:
  103. /* Flush Data Tlb */
  104. LDREG DTLB_SID_BASE(%r1), %r20
  105. LDREG DTLB_SID_STRIDE(%r1), %r21
  106. LDREG DTLB_SID_COUNT(%r1), %r22
  107. LDREG DTLB_OFF_BASE(%r1), %arg0
  108. LDREG DTLB_OFF_STRIDE(%r1), %arg1
  109. LDREG DTLB_OFF_COUNT(%r1), %arg2
  110. LDREG DTLB_LOOP(%r1), %arg3
  111. addib,COND(=) -1, %arg3, fdtoneloop /* Preadjust and test */
  112. movb,<,n %arg3, %r31, fdtdone /* If loop < 0, skip */
  113. copy %arg0, %r28 /* Init base addr */
  114. fdtmanyloop: /* Loop if LOOP >= 2 */
  115. mtsp %r20, %sr1
  116. add %r21, %r20, %r20 /* increment space */
  117. copy %arg2, %r29 /* Init middle loop count */
  118. fdtmanymiddle: /* Loop if LOOP >= 2 */
  119. addib,COND(>) -1, %r31, fdtmanymiddle /* Adjusted inner loop decr */
  120. pdtlbe %r0(%sr1, %r28)
  121. pdtlbe,m %arg1(%sr1, %r28) /* Last pdtlbe and addr adjust */
  122. addib,COND(>) -1, %r29, fdtmanymiddle /* Middle loop decr */
  123. copy %arg3, %r31 /* Re-init inner loop count */
  124. movb,tr %arg0, %r28, fdtmanyloop /* Re-init base addr */
  125. addib,COND(<=),n -1, %r22,fdtdone /* Outer loop count decr */
  126. fdtoneloop: /* Loop if LOOP = 1 */
  127. mtsp %r20, %sr1
  128. copy %arg0, %r28 /* init base addr */
  129. copy %arg2, %r29 /* init middle loop count */
  130. fdtonemiddle: /* Loop if LOOP = 1 */
  131. addib,COND(>) -1, %r29, fdtonemiddle /* Middle loop count decr */
  132. pdtlbe,m %arg1(%sr1, %r28) /* pdtlbe for one loop */
  133. addib,COND(>) -1, %r22, fdtoneloop /* Outer loop count decr */
  134. add %r21, %r20, %r20 /* increment space */
  135. fdtdone:
  136. /*
  137. * Switch back to virtual mode
  138. */
  139. /* pcxt_ssm_bug */
  140. rsm PSW_SM_I, %r0
  141. load32 2f, %r1
  142. nop
  143. nop
  144. nop
  145. nop
  146. nop
  147. rsm PSW_SM_Q, %r0 /* prep to load iia queue */
  148. mtctl %r0, %cr17 /* Clear IIASQ tail */
  149. mtctl %r0, %cr17 /* Clear IIASQ head */
  150. mtctl %r1, %cr18 /* IIAOQ head */
  151. ldo 4(%r1), %r1
  152. mtctl %r1, %cr18 /* IIAOQ tail */
  153. load32 KERNEL_PSW, %r1
  154. or %r1, %r19, %r1 /* I-bit to state on entry */
  155. mtctl %r1, %ipsw /* restore I-bit (entire PSW) */
  156. rfi
  157. nop
  158. 2: bv %r0(%r2)
  159. nop
  160. .exit
  161. .procend
  162. ENDPROC_CFI(flush_tlb_all_local)
  163. .import cache_info,data
  164. ENTRY_CFI(flush_instruction_cache_local)
  165. .proc
  166. .callinfo NO_CALLS
  167. .entry
  168. load32 cache_info, %r1
  169. /* Flush Instruction Cache */
  170. LDREG ICACHE_BASE(%r1), %arg0
  171. LDREG ICACHE_STRIDE(%r1), %arg1
  172. LDREG ICACHE_COUNT(%r1), %arg2
  173. LDREG ICACHE_LOOP(%r1), %arg3
  174. rsm PSW_SM_I, %r22 /* No mmgt ops during loop*/
  175. mtsp %r0, %sr1
  176. addib,COND(=) -1, %arg3, fioneloop /* Preadjust and test */
  177. movb,<,n %arg3, %r31, fisync /* If loop < 0, do sync */
  178. fimanyloop: /* Loop if LOOP >= 2 */
  179. addib,COND(>) -1, %r31, fimanyloop /* Adjusted inner loop decr */
  180. fice %r0(%sr1, %arg0)
  181. fice,m %arg1(%sr1, %arg0) /* Last fice and addr adjust */
  182. movb,tr %arg3, %r31, fimanyloop /* Re-init inner loop count */
  183. addib,COND(<=),n -1, %arg2, fisync /* Outer loop decr */
  184. fioneloop: /* Loop if LOOP = 1 */
  185. /* Some implementations may flush with a single fice instruction */
  186. cmpib,COND(>>=),n 15, %arg2, fioneloop2
  187. fioneloop1:
  188. fice,m %arg1(%sr1, %arg0)
  189. fice,m %arg1(%sr1, %arg0)
  190. fice,m %arg1(%sr1, %arg0)
  191. fice,m %arg1(%sr1, %arg0)
  192. fice,m %arg1(%sr1, %arg0)
  193. fice,m %arg1(%sr1, %arg0)
  194. fice,m %arg1(%sr1, %arg0)
  195. fice,m %arg1(%sr1, %arg0)
  196. fice,m %arg1(%sr1, %arg0)
  197. fice,m %arg1(%sr1, %arg0)
  198. fice,m %arg1(%sr1, %arg0)
  199. fice,m %arg1(%sr1, %arg0)
  200. fice,m %arg1(%sr1, %arg0)
  201. fice,m %arg1(%sr1, %arg0)
  202. fice,m %arg1(%sr1, %arg0)
  203. addib,COND(>) -16, %arg2, fioneloop1
  204. fice,m %arg1(%sr1, %arg0)
  205. /* Check if done */
  206. cmpb,COND(=),n %arg2, %r0, fisync /* Predict branch taken */
  207. fioneloop2:
  208. addib,COND(>) -1, %arg2, fioneloop2 /* Outer loop count decr */
  209. fice,m %arg1(%sr1, %arg0) /* Fice for one loop */
  210. fisync:
  211. sync
  212. mtsm %r22 /* restore I-bit */
  213. bv %r0(%r2)
  214. nop
  215. .exit
  216. .procend
  217. ENDPROC_CFI(flush_instruction_cache_local)
  218. .import cache_info, data
  219. ENTRY_CFI(flush_data_cache_local)
  220. .proc
  221. .callinfo NO_CALLS
  222. .entry
  223. load32 cache_info, %r1
  224. /* Flush Data Cache */
  225. LDREG DCACHE_BASE(%r1), %arg0
  226. LDREG DCACHE_STRIDE(%r1), %arg1
  227. LDREG DCACHE_COUNT(%r1), %arg2
  228. LDREG DCACHE_LOOP(%r1), %arg3
  229. rsm PSW_SM_I, %r22 /* No mmgt ops during loop*/
  230. mtsp %r0, %sr1
  231. addib,COND(=) -1, %arg3, fdoneloop /* Preadjust and test */
  232. movb,<,n %arg3, %r31, fdsync /* If loop < 0, do sync */
  233. fdmanyloop: /* Loop if LOOP >= 2 */
  234. addib,COND(>) -1, %r31, fdmanyloop /* Adjusted inner loop decr */
  235. fdce %r0(%sr1, %arg0)
  236. fdce,m %arg1(%sr1, %arg0) /* Last fdce and addr adjust */
  237. movb,tr %arg3, %r31, fdmanyloop /* Re-init inner loop count */
  238. addib,COND(<=),n -1, %arg2, fdsync /* Outer loop decr */
  239. fdoneloop: /* Loop if LOOP = 1 */
  240. /* Some implementations may flush with a single fdce instruction */
  241. cmpib,COND(>>=),n 15, %arg2, fdoneloop2
  242. fdoneloop1:
  243. fdce,m %arg1(%sr1, %arg0)
  244. fdce,m %arg1(%sr1, %arg0)
  245. fdce,m %arg1(%sr1, %arg0)
  246. fdce,m %arg1(%sr1, %arg0)
  247. fdce,m %arg1(%sr1, %arg0)
  248. fdce,m %arg1(%sr1, %arg0)
  249. fdce,m %arg1(%sr1, %arg0)
  250. fdce,m %arg1(%sr1, %arg0)
  251. fdce,m %arg1(%sr1, %arg0)
  252. fdce,m %arg1(%sr1, %arg0)
  253. fdce,m %arg1(%sr1, %arg0)
  254. fdce,m %arg1(%sr1, %arg0)
  255. fdce,m %arg1(%sr1, %arg0)
  256. fdce,m %arg1(%sr1, %arg0)
  257. fdce,m %arg1(%sr1, %arg0)
  258. addib,COND(>) -16, %arg2, fdoneloop1
  259. fdce,m %arg1(%sr1, %arg0)
  260. /* Check if done */
  261. cmpb,COND(=),n %arg2, %r0, fdsync /* Predict branch taken */
  262. fdoneloop2:
  263. addib,COND(>) -1, %arg2, fdoneloop2 /* Outer loop count decr */
  264. fdce,m %arg1(%sr1, %arg0) /* Fdce for one loop */
  265. fdsync:
  266. syncdma
  267. sync
  268. mtsm %r22 /* restore I-bit */
  269. bv %r0(%r2)
  270. nop
  271. .exit
  272. .procend
  273. ENDPROC_CFI(flush_data_cache_local)
  274. /* Macros to serialize TLB purge operations on SMP. */
  275. .macro tlb_lock la,flags,tmp
  276. #ifdef CONFIG_SMP
  277. #if __PA_LDCW_ALIGNMENT > 4
  278. load32 pa_tlb_lock + __PA_LDCW_ALIGNMENT-1, \la
  279. depi 0,31,__PA_LDCW_ALIGN_ORDER, \la
  280. #else
  281. load32 pa_tlb_lock, \la
  282. #endif
  283. rsm PSW_SM_I,\flags
  284. 1: LDCW 0(\la),\tmp
  285. cmpib,<>,n 0,\tmp,3f
  286. 2: ldw 0(\la),\tmp
  287. cmpb,<> %r0,\tmp,1b
  288. nop
  289. b,n 2b
  290. 3:
  291. #endif
  292. .endm
  293. .macro tlb_unlock la,flags,tmp
  294. #ifdef CONFIG_SMP
  295. ldi 1,\tmp
  296. stw \tmp,0(\la)
  297. mtsm \flags
  298. #endif
  299. .endm
  300. /* Clear page using kernel mapping. */
  301. ENTRY_CFI(clear_page_asm)
  302. .proc
  303. .callinfo NO_CALLS
  304. .entry
  305. #ifdef CONFIG_64BIT
  306. /* Unroll the loop. */
  307. ldi (PAGE_SIZE / 128), %r1
  308. 1:
  309. std %r0, 0(%r26)
  310. std %r0, 8(%r26)
  311. std %r0, 16(%r26)
  312. std %r0, 24(%r26)
  313. std %r0, 32(%r26)
  314. std %r0, 40(%r26)
  315. std %r0, 48(%r26)
  316. std %r0, 56(%r26)
  317. std %r0, 64(%r26)
  318. std %r0, 72(%r26)
  319. std %r0, 80(%r26)
  320. std %r0, 88(%r26)
  321. std %r0, 96(%r26)
  322. std %r0, 104(%r26)
  323. std %r0, 112(%r26)
  324. std %r0, 120(%r26)
  325. /* Note reverse branch hint for addib is taken. */
  326. addib,COND(>),n -1, %r1, 1b
  327. ldo 128(%r26), %r26
  328. #else
  329. /*
  330. * Note that until (if) we start saving the full 64-bit register
  331. * values on interrupt, we can't use std on a 32 bit kernel.
  332. */
  333. ldi (PAGE_SIZE / 64), %r1
  334. 1:
  335. stw %r0, 0(%r26)
  336. stw %r0, 4(%r26)
  337. stw %r0, 8(%r26)
  338. stw %r0, 12(%r26)
  339. stw %r0, 16(%r26)
  340. stw %r0, 20(%r26)
  341. stw %r0, 24(%r26)
  342. stw %r0, 28(%r26)
  343. stw %r0, 32(%r26)
  344. stw %r0, 36(%r26)
  345. stw %r0, 40(%r26)
  346. stw %r0, 44(%r26)
  347. stw %r0, 48(%r26)
  348. stw %r0, 52(%r26)
  349. stw %r0, 56(%r26)
  350. stw %r0, 60(%r26)
  351. addib,COND(>),n -1, %r1, 1b
  352. ldo 64(%r26), %r26
  353. #endif
  354. bv %r0(%r2)
  355. nop
  356. .exit
  357. .procend
  358. ENDPROC_CFI(clear_page_asm)
  359. /* Copy page using kernel mapping. */
  360. ENTRY_CFI(copy_page_asm)
  361. .proc
  362. .callinfo NO_CALLS
  363. .entry
  364. #ifdef CONFIG_64BIT
  365. /* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
  366. * Unroll the loop by hand and arrange insn appropriately.
  367. * Prefetch doesn't improve performance on rp3440.
  368. * GCC probably can do this just as well...
  369. */
  370. ldi (PAGE_SIZE / 128), %r1
  371. 1: ldd 0(%r25), %r19
  372. ldd 8(%r25), %r20
  373. ldd 16(%r25), %r21
  374. ldd 24(%r25), %r22
  375. std %r19, 0(%r26)
  376. std %r20, 8(%r26)
  377. ldd 32(%r25), %r19
  378. ldd 40(%r25), %r20
  379. std %r21, 16(%r26)
  380. std %r22, 24(%r26)
  381. ldd 48(%r25), %r21
  382. ldd 56(%r25), %r22
  383. std %r19, 32(%r26)
  384. std %r20, 40(%r26)
  385. ldd 64(%r25), %r19
  386. ldd 72(%r25), %r20
  387. std %r21, 48(%r26)
  388. std %r22, 56(%r26)
  389. ldd 80(%r25), %r21
  390. ldd 88(%r25), %r22
  391. std %r19, 64(%r26)
  392. std %r20, 72(%r26)
  393. ldd 96(%r25), %r19
  394. ldd 104(%r25), %r20
  395. std %r21, 80(%r26)
  396. std %r22, 88(%r26)
  397. ldd 112(%r25), %r21
  398. ldd 120(%r25), %r22
  399. ldo 128(%r25), %r25
  400. std %r19, 96(%r26)
  401. std %r20, 104(%r26)
  402. std %r21, 112(%r26)
  403. std %r22, 120(%r26)
  404. /* Note reverse branch hint for addib is taken. */
  405. addib,COND(>),n -1, %r1, 1b
  406. ldo 128(%r26), %r26
  407. #else
  408. /*
  409. * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
  410. * bundles (very restricted rules for bundling).
  411. * Note that until (if) we start saving
  412. * the full 64 bit register values on interrupt, we can't
  413. * use ldd/std on a 32 bit kernel.
  414. */
  415. ldw 0(%r25), %r19
  416. ldi (PAGE_SIZE / 64), %r1
  417. 1:
  418. ldw 4(%r25), %r20
  419. ldw 8(%r25), %r21
  420. ldw 12(%r25), %r22
  421. stw %r19, 0(%r26)
  422. stw %r20, 4(%r26)
  423. stw %r21, 8(%r26)
  424. stw %r22, 12(%r26)
  425. ldw 16(%r25), %r19
  426. ldw 20(%r25), %r20
  427. ldw 24(%r25), %r21
  428. ldw 28(%r25), %r22
  429. stw %r19, 16(%r26)
  430. stw %r20, 20(%r26)
  431. stw %r21, 24(%r26)
  432. stw %r22, 28(%r26)
  433. ldw 32(%r25), %r19
  434. ldw 36(%r25), %r20
  435. ldw 40(%r25), %r21
  436. ldw 44(%r25), %r22
  437. stw %r19, 32(%r26)
  438. stw %r20, 36(%r26)
  439. stw %r21, 40(%r26)
  440. stw %r22, 44(%r26)
  441. ldw 48(%r25), %r19
  442. ldw 52(%r25), %r20
  443. ldw 56(%r25), %r21
  444. ldw 60(%r25), %r22
  445. stw %r19, 48(%r26)
  446. stw %r20, 52(%r26)
  447. ldo 64(%r25), %r25
  448. stw %r21, 56(%r26)
  449. stw %r22, 60(%r26)
  450. ldo 64(%r26), %r26
  451. addib,COND(>),n -1, %r1, 1b
  452. ldw 0(%r25), %r19
  453. #endif
  454. bv %r0(%r2)
  455. nop
  456. .exit
  457. .procend
  458. ENDPROC_CFI(copy_page_asm)
  459. /*
  460. * NOTE: Code in clear_user_page has a hard coded dependency on the
  461. * maximum alias boundary being 4 Mb. We've been assured by the
  462. * parisc chip designers that there will not ever be a parisc
  463. * chip with a larger alias boundary (Never say never :-) ).
  464. *
  465. * Subtle: the dtlb miss handlers support the temp alias region by
  466. * "knowing" that if a dtlb miss happens within the temp alias
  467. * region it must have occurred while in clear_user_page. Since
  468. * this routine makes use of processor local translations, we
  469. * don't want to insert them into the kernel page table. Instead,
  470. * we load up some general registers (they need to be registers
  471. * which aren't shadowed) with the physical page numbers (preshifted
  472. * for tlb insertion) needed to insert the translations. When we
  473. * miss on the translation, the dtlb miss handler inserts the
  474. * translation into the tlb using these values:
  475. *
  476. * %r26 physical page (shifted for tlb insert) of "to" translation
  477. * %r23 physical page (shifted for tlb insert) of "from" translation
  478. */
  479. /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
  480. #define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
  481. .macro convert_phys_for_tlb_insert20 phys
  482. extrd,u \phys, 56-PAGE_ADD_SHIFT, 32-PAGE_ADD_SHIFT, \phys
  483. #if _PAGE_SIZE_ENCODING_DEFAULT
  484. depdi _PAGE_SIZE_ENCODING_DEFAULT, 63, (63-58), \phys
  485. #endif
  486. .endm
  487. /*
  488. * copy_user_page_asm() performs a page copy using mappings
  489. * equivalent to the user page mappings. It can be used to
  490. * implement copy_user_page() but unfortunately both the `from'
  491. * and `to' pages need to be flushed through mappings equivalent
  492. * to the user mappings after the copy because the kernel accesses
  493. * the `from' page through the kmap kernel mapping and the `to'
  494. * page needs to be flushed since code can be copied. As a
  495. * result, this implementation is less efficient than the simpler
  496. * copy using the kernel mapping. It only needs the `from' page
  497. * to flushed via the user mapping. The kunmap routines handle
  498. * the flushes needed for the kernel mapping.
  499. *
  500. * I'm still keeping this around because it may be possible to
  501. * use it if more information is passed into copy_user_page().
  502. * Have to do some measurements to see if it is worthwhile to
  503. * lobby for such a change.
  504. *
  505. */
  506. ENTRY_CFI(copy_user_page_asm)
  507. .proc
  508. .callinfo NO_CALLS
  509. .entry
  510. /* Convert virtual `to' and `from' addresses to physical addresses.
  511. Move `from' physical address to non shadowed register. */
  512. ldil L%(__PAGE_OFFSET), %r1
  513. sub %r26, %r1, %r26
  514. sub %r25, %r1, %r23
  515. ldil L%(TMPALIAS_MAP_START), %r28
  516. #ifdef CONFIG_64BIT
  517. #if (TMPALIAS_MAP_START >= 0x80000000)
  518. depdi 0, 31,32, %r28 /* clear any sign extension */
  519. #endif
  520. convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
  521. convert_phys_for_tlb_insert20 %r23 /* convert phys addr to tlb insert format */
  522. depd %r24,63,22, %r28 /* Form aliased virtual address 'to' */
  523. depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
  524. copy %r28, %r29
  525. depdi 1, 41,1, %r29 /* Form aliased virtual address 'from' */
  526. #else
  527. extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
  528. extrw,u %r23, 24,25, %r23 /* convert phys addr to tlb insert format */
  529. depw %r24, 31,22, %r28 /* Form aliased virtual address 'to' */
  530. depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
  531. copy %r28, %r29
  532. depwi 1, 9,1, %r29 /* Form aliased virtual address 'from' */
  533. #endif
  534. /* Purge any old translations */
  535. #ifdef CONFIG_PA20
  536. pdtlb,l %r0(%r28)
  537. pdtlb,l %r0(%r29)
  538. #else
  539. tlb_lock %r20,%r21,%r22
  540. pdtlb %r0(%r28)
  541. pdtlb %r0(%r29)
  542. tlb_unlock %r20,%r21,%r22
  543. #endif
  544. #ifdef CONFIG_64BIT
  545. /* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
  546. * Unroll the loop by hand and arrange insn appropriately.
  547. * GCC probably can do this just as well.
  548. */
  549. ldd 0(%r29), %r19
  550. ldi (PAGE_SIZE / 128), %r1
  551. 1: ldd 8(%r29), %r20
  552. ldd 16(%r29), %r21
  553. ldd 24(%r29), %r22
  554. std %r19, 0(%r28)
  555. std %r20, 8(%r28)
  556. ldd 32(%r29), %r19
  557. ldd 40(%r29), %r20
  558. std %r21, 16(%r28)
  559. std %r22, 24(%r28)
  560. ldd 48(%r29), %r21
  561. ldd 56(%r29), %r22
  562. std %r19, 32(%r28)
  563. std %r20, 40(%r28)
  564. ldd 64(%r29), %r19
  565. ldd 72(%r29), %r20
  566. std %r21, 48(%r28)
  567. std %r22, 56(%r28)
  568. ldd 80(%r29), %r21
  569. ldd 88(%r29), %r22
  570. std %r19, 64(%r28)
  571. std %r20, 72(%r28)
  572. ldd 96(%r29), %r19
  573. ldd 104(%r29), %r20
  574. std %r21, 80(%r28)
  575. std %r22, 88(%r28)
  576. ldd 112(%r29), %r21
  577. ldd 120(%r29), %r22
  578. std %r19, 96(%r28)
  579. std %r20, 104(%r28)
  580. ldo 128(%r29), %r29
  581. std %r21, 112(%r28)
  582. std %r22, 120(%r28)
  583. ldo 128(%r28), %r28
  584. /* conditional branches nullify on forward taken branch, and on
  585. * non-taken backward branch. Note that .+4 is a backwards branch.
  586. * The ldd should only get executed if the branch is taken.
  587. */
  588. addib,COND(>),n -1, %r1, 1b /* bundle 10 */
  589. ldd 0(%r29), %r19 /* start next loads */
  590. #else
  591. ldi (PAGE_SIZE / 64), %r1
  592. /*
  593. * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
  594. * bundles (very restricted rules for bundling). It probably
  595. * does OK on PCXU and better, but we could do better with
  596. * ldd/std instructions. Note that until (if) we start saving
  597. * the full 64 bit register values on interrupt, we can't
  598. * use ldd/std on a 32 bit kernel.
  599. */
  600. 1: ldw 0(%r29), %r19
  601. ldw 4(%r29), %r20
  602. ldw 8(%r29), %r21
  603. ldw 12(%r29), %r22
  604. stw %r19, 0(%r28)
  605. stw %r20, 4(%r28)
  606. stw %r21, 8(%r28)
  607. stw %r22, 12(%r28)
  608. ldw 16(%r29), %r19
  609. ldw 20(%r29), %r20
  610. ldw 24(%r29), %r21
  611. ldw 28(%r29), %r22
  612. stw %r19, 16(%r28)
  613. stw %r20, 20(%r28)
  614. stw %r21, 24(%r28)
  615. stw %r22, 28(%r28)
  616. ldw 32(%r29), %r19
  617. ldw 36(%r29), %r20
  618. ldw 40(%r29), %r21
  619. ldw 44(%r29), %r22
  620. stw %r19, 32(%r28)
  621. stw %r20, 36(%r28)
  622. stw %r21, 40(%r28)
  623. stw %r22, 44(%r28)
  624. ldw 48(%r29), %r19
  625. ldw 52(%r29), %r20
  626. ldw 56(%r29), %r21
  627. ldw 60(%r29), %r22
  628. stw %r19, 48(%r28)
  629. stw %r20, 52(%r28)
  630. stw %r21, 56(%r28)
  631. stw %r22, 60(%r28)
  632. ldo 64(%r28), %r28
  633. addib,COND(>) -1, %r1,1b
  634. ldo 64(%r29), %r29
  635. #endif
  636. bv %r0(%r2)
  637. nop
  638. .exit
  639. .procend
  640. ENDPROC_CFI(copy_user_page_asm)
  641. ENTRY_CFI(clear_user_page_asm)
  642. .proc
  643. .callinfo NO_CALLS
  644. .entry
  645. tophys_r1 %r26
  646. ldil L%(TMPALIAS_MAP_START), %r28
  647. #ifdef CONFIG_64BIT
  648. #if (TMPALIAS_MAP_START >= 0x80000000)
  649. depdi 0, 31,32, %r28 /* clear any sign extension */
  650. #endif
  651. convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
  652. depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
  653. depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
  654. #else
  655. extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
  656. depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
  657. depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
  658. #endif
  659. /* Purge any old translation */
  660. #ifdef CONFIG_PA20
  661. pdtlb,l %r0(%r28)
  662. #else
  663. tlb_lock %r20,%r21,%r22
  664. pdtlb %r0(%r28)
  665. tlb_unlock %r20,%r21,%r22
  666. #endif
  667. #ifdef CONFIG_64BIT
  668. ldi (PAGE_SIZE / 128), %r1
  669. /* PREFETCH (Write) has not (yet) been proven to help here */
  670. /* #define PREFETCHW_OP ldd 256(%0), %r0 */
  671. 1: std %r0, 0(%r28)
  672. std %r0, 8(%r28)
  673. std %r0, 16(%r28)
  674. std %r0, 24(%r28)
  675. std %r0, 32(%r28)
  676. std %r0, 40(%r28)
  677. std %r0, 48(%r28)
  678. std %r0, 56(%r28)
  679. std %r0, 64(%r28)
  680. std %r0, 72(%r28)
  681. std %r0, 80(%r28)
  682. std %r0, 88(%r28)
  683. std %r0, 96(%r28)
  684. std %r0, 104(%r28)
  685. std %r0, 112(%r28)
  686. std %r0, 120(%r28)
  687. addib,COND(>) -1, %r1, 1b
  688. ldo 128(%r28), %r28
  689. #else /* ! CONFIG_64BIT */
  690. ldi (PAGE_SIZE / 64), %r1
  691. 1: stw %r0, 0(%r28)
  692. stw %r0, 4(%r28)
  693. stw %r0, 8(%r28)
  694. stw %r0, 12(%r28)
  695. stw %r0, 16(%r28)
  696. stw %r0, 20(%r28)
  697. stw %r0, 24(%r28)
  698. stw %r0, 28(%r28)
  699. stw %r0, 32(%r28)
  700. stw %r0, 36(%r28)
  701. stw %r0, 40(%r28)
  702. stw %r0, 44(%r28)
  703. stw %r0, 48(%r28)
  704. stw %r0, 52(%r28)
  705. stw %r0, 56(%r28)
  706. stw %r0, 60(%r28)
  707. addib,COND(>) -1, %r1, 1b
  708. ldo 64(%r28), %r28
  709. #endif /* CONFIG_64BIT */
  710. bv %r0(%r2)
  711. nop
  712. .exit
  713. .procend
  714. ENDPROC_CFI(clear_user_page_asm)
  715. ENTRY_CFI(flush_dcache_page_asm)
  716. .proc
  717. .callinfo NO_CALLS
  718. .entry
  719. ldil L%(TMPALIAS_MAP_START), %r28
  720. #ifdef CONFIG_64BIT
  721. #if (TMPALIAS_MAP_START >= 0x80000000)
  722. depdi 0, 31,32, %r28 /* clear any sign extension */
  723. #endif
  724. convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
  725. depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
  726. depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
  727. #else
  728. extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
  729. depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
  730. depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
  731. #endif
  732. /* Purge any old translation */
  733. #ifdef CONFIG_PA20
  734. pdtlb,l %r0(%r28)
  735. #else
  736. tlb_lock %r20,%r21,%r22
  737. pdtlb %r0(%r28)
  738. tlb_unlock %r20,%r21,%r22
  739. #endif
  740. ldil L%dcache_stride, %r1
  741. ldw R%dcache_stride(%r1), r31
  742. #ifdef CONFIG_64BIT
  743. depdi,z 1, 63-PAGE_SHIFT,1, %r25
  744. #else
  745. depwi,z 1, 31-PAGE_SHIFT,1, %r25
  746. #endif
  747. add %r28, %r25, %r25
  748. sub %r25, r31, %r25
  749. 1: fdc,m r31(%r28)
  750. fdc,m r31(%r28)
  751. fdc,m r31(%r28)
  752. fdc,m r31(%r28)
  753. fdc,m r31(%r28)
  754. fdc,m r31(%r28)
  755. fdc,m r31(%r28)
  756. fdc,m r31(%r28)
  757. fdc,m r31(%r28)
  758. fdc,m r31(%r28)
  759. fdc,m r31(%r28)
  760. fdc,m r31(%r28)
  761. fdc,m r31(%r28)
  762. fdc,m r31(%r28)
  763. fdc,m r31(%r28)
  764. cmpb,COND(<<) %r28, %r25,1b
  765. fdc,m r31(%r28)
  766. sync
  767. bv %r0(%r2)
  768. nop
  769. .exit
  770. .procend
  771. ENDPROC_CFI(flush_dcache_page_asm)
  772. ENTRY_CFI(flush_icache_page_asm)
  773. .proc
  774. .callinfo NO_CALLS
  775. .entry
  776. ldil L%(TMPALIAS_MAP_START), %r28
  777. #ifdef CONFIG_64BIT
  778. #if (TMPALIAS_MAP_START >= 0x80000000)
  779. depdi 0, 31,32, %r28 /* clear any sign extension */
  780. #endif
  781. convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
  782. depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
  783. depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
  784. #else
  785. extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
  786. depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
  787. depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
  788. #endif
  789. /* Purge any old translation. Note that the FIC instruction
  790. * may use either the instruction or data TLB. Given that we
  791. * have a flat address space, it's not clear which TLB will be
  792. * used. So, we purge both entries. */
  793. #ifdef CONFIG_PA20
  794. pdtlb,l %r0(%r28)
  795. pitlb,l %r0(%sr4,%r28)
  796. #else
  797. tlb_lock %r20,%r21,%r22
  798. pdtlb %r0(%r28)
  799. pitlb %r0(%sr4,%r28)
  800. tlb_unlock %r20,%r21,%r22
  801. #endif
  802. ldil L%icache_stride, %r1
  803. ldw R%icache_stride(%r1), %r31
  804. #ifdef CONFIG_64BIT
  805. depdi,z 1, 63-PAGE_SHIFT,1, %r25
  806. #else
  807. depwi,z 1, 31-PAGE_SHIFT,1, %r25
  808. #endif
  809. add %r28, %r25, %r25
  810. sub %r25, %r31, %r25
  811. /* fic only has the type 26 form on PA1.1, requiring an
  812. * explicit space specification, so use %sr4 */
  813. 1: fic,m %r31(%sr4,%r28)
  814. fic,m %r31(%sr4,%r28)
  815. fic,m %r31(%sr4,%r28)
  816. fic,m %r31(%sr4,%r28)
  817. fic,m %r31(%sr4,%r28)
  818. fic,m %r31(%sr4,%r28)
  819. fic,m %r31(%sr4,%r28)
  820. fic,m %r31(%sr4,%r28)
  821. fic,m %r31(%sr4,%r28)
  822. fic,m %r31(%sr4,%r28)
  823. fic,m %r31(%sr4,%r28)
  824. fic,m %r31(%sr4,%r28)
  825. fic,m %r31(%sr4,%r28)
  826. fic,m %r31(%sr4,%r28)
  827. fic,m %r31(%sr4,%r28)
  828. cmpb,COND(<<) %r28, %r25,1b
  829. fic,m %r31(%sr4,%r28)
  830. sync
  831. bv %r0(%r2)
  832. nop
  833. .exit
  834. .procend
  835. ENDPROC_CFI(flush_icache_page_asm)
  836. ENTRY_CFI(flush_kernel_dcache_page_asm)
  837. .proc
  838. .callinfo NO_CALLS
  839. .entry
  840. ldil L%dcache_stride, %r1
  841. ldw R%dcache_stride(%r1), %r23
  842. #ifdef CONFIG_64BIT
  843. depdi,z 1, 63-PAGE_SHIFT,1, %r25
  844. #else
  845. depwi,z 1, 31-PAGE_SHIFT,1, %r25
  846. #endif
  847. add %r26, %r25, %r25
  848. sub %r25, %r23, %r25
  849. 1: fdc,m %r23(%r26)
  850. fdc,m %r23(%r26)
  851. fdc,m %r23(%r26)
  852. fdc,m %r23(%r26)
  853. fdc,m %r23(%r26)
  854. fdc,m %r23(%r26)
  855. fdc,m %r23(%r26)
  856. fdc,m %r23(%r26)
  857. fdc,m %r23(%r26)
  858. fdc,m %r23(%r26)
  859. fdc,m %r23(%r26)
  860. fdc,m %r23(%r26)
  861. fdc,m %r23(%r26)
  862. fdc,m %r23(%r26)
  863. fdc,m %r23(%r26)
  864. cmpb,COND(<<) %r26, %r25,1b
  865. fdc,m %r23(%r26)
  866. sync
  867. bv %r0(%r2)
  868. nop
  869. .exit
  870. .procend
  871. ENDPROC_CFI(flush_kernel_dcache_page_asm)
  872. ENTRY_CFI(purge_kernel_dcache_page_asm)
  873. .proc
  874. .callinfo NO_CALLS
  875. .entry
  876. ldil L%dcache_stride, %r1
  877. ldw R%dcache_stride(%r1), %r23
  878. #ifdef CONFIG_64BIT
  879. depdi,z 1, 63-PAGE_SHIFT,1, %r25
  880. #else
  881. depwi,z 1, 31-PAGE_SHIFT,1, %r25
  882. #endif
  883. add %r26, %r25, %r25
  884. sub %r25, %r23, %r25
  885. 1: pdc,m %r23(%r26)
  886. pdc,m %r23(%r26)
  887. pdc,m %r23(%r26)
  888. pdc,m %r23(%r26)
  889. pdc,m %r23(%r26)
  890. pdc,m %r23(%r26)
  891. pdc,m %r23(%r26)
  892. pdc,m %r23(%r26)
  893. pdc,m %r23(%r26)
  894. pdc,m %r23(%r26)
  895. pdc,m %r23(%r26)
  896. pdc,m %r23(%r26)
  897. pdc,m %r23(%r26)
  898. pdc,m %r23(%r26)
  899. pdc,m %r23(%r26)
  900. cmpb,COND(<<) %r26, %r25, 1b
  901. pdc,m %r23(%r26)
  902. sync
  903. bv %r0(%r2)
  904. nop
  905. .exit
  906. .procend
  907. ENDPROC_CFI(purge_kernel_dcache_page_asm)
  908. ENTRY_CFI(flush_user_dcache_range_asm)
  909. .proc
  910. .callinfo NO_CALLS
  911. .entry
  912. ldil L%dcache_stride, %r1
  913. ldw R%dcache_stride(%r1), %r23
  914. ldo -1(%r23), %r21
  915. ANDCM %r26, %r21, %r26
  916. 1: cmpb,COND(<<),n %r26, %r25, 1b
  917. fdc,m %r23(%sr3, %r26)
  918. sync
  919. bv %r0(%r2)
  920. nop
  921. .exit
  922. .procend
  923. ENDPROC_CFI(flush_user_dcache_range_asm)
  924. ENTRY_CFI(flush_kernel_dcache_range_asm)
  925. .proc
  926. .callinfo NO_CALLS
  927. .entry
  928. ldil L%dcache_stride, %r1
  929. ldw R%dcache_stride(%r1), %r23
  930. ldo -1(%r23), %r21
  931. ANDCM %r26, %r21, %r26
  932. 1: cmpb,COND(<<),n %r26, %r25,1b
  933. fdc,m %r23(%r26)
  934. sync
  935. syncdma
  936. bv %r0(%r2)
  937. nop
  938. .exit
  939. .procend
  940. ENDPROC_CFI(flush_kernel_dcache_range_asm)
  941. ENTRY_CFI(purge_kernel_dcache_range_asm)
  942. .proc
  943. .callinfo NO_CALLS
  944. .entry
  945. ldil L%dcache_stride, %r1
  946. ldw R%dcache_stride(%r1), %r23
  947. ldo -1(%r23), %r21
  948. ANDCM %r26, %r21, %r26
  949. 1: cmpb,COND(<<),n %r26, %r25,1b
  950. pdc,m %r23(%r26)
  951. sync
  952. syncdma
  953. bv %r0(%r2)
  954. nop
  955. .exit
  956. .procend
  957. ENDPROC_CFI(purge_kernel_dcache_range_asm)
  958. ENTRY_CFI(flush_user_icache_range_asm)
  959. .proc
  960. .callinfo NO_CALLS
  961. .entry
  962. ldil L%icache_stride, %r1
  963. ldw R%icache_stride(%r1), %r23
  964. ldo -1(%r23), %r21
  965. ANDCM %r26, %r21, %r26
  966. 1: cmpb,COND(<<),n %r26, %r25,1b
  967. fic,m %r23(%sr3, %r26)
  968. sync
  969. bv %r0(%r2)
  970. nop
  971. .exit
  972. .procend
  973. ENDPROC_CFI(flush_user_icache_range_asm)
  974. ENTRY_CFI(flush_kernel_icache_page)
  975. .proc
  976. .callinfo NO_CALLS
  977. .entry
  978. ldil L%icache_stride, %r1
  979. ldw R%icache_stride(%r1), %r23
  980. #ifdef CONFIG_64BIT
  981. depdi,z 1, 63-PAGE_SHIFT,1, %r25
  982. #else
  983. depwi,z 1, 31-PAGE_SHIFT,1, %r25
  984. #endif
  985. add %r26, %r25, %r25
  986. sub %r25, %r23, %r25
  987. 1: fic,m %r23(%sr4, %r26)
  988. fic,m %r23(%sr4, %r26)
  989. fic,m %r23(%sr4, %r26)
  990. fic,m %r23(%sr4, %r26)
  991. fic,m %r23(%sr4, %r26)
  992. fic,m %r23(%sr4, %r26)
  993. fic,m %r23(%sr4, %r26)
  994. fic,m %r23(%sr4, %r26)
  995. fic,m %r23(%sr4, %r26)
  996. fic,m %r23(%sr4, %r26)
  997. fic,m %r23(%sr4, %r26)
  998. fic,m %r23(%sr4, %r26)
  999. fic,m %r23(%sr4, %r26)
  1000. fic,m %r23(%sr4, %r26)
  1001. fic,m %r23(%sr4, %r26)
  1002. cmpb,COND(<<) %r26, %r25, 1b
  1003. fic,m %r23(%sr4, %r26)
  1004. sync
  1005. bv %r0(%r2)
  1006. nop
  1007. .exit
  1008. .procend
  1009. ENDPROC_CFI(flush_kernel_icache_page)
  1010. ENTRY_CFI(flush_kernel_icache_range_asm)
  1011. .proc
  1012. .callinfo NO_CALLS
  1013. .entry
  1014. ldil L%icache_stride, %r1
  1015. ldw R%icache_stride(%r1), %r23
  1016. ldo -1(%r23), %r21
  1017. ANDCM %r26, %r21, %r26
  1018. 1: cmpb,COND(<<),n %r26, %r25, 1b
  1019. fic,m %r23(%sr4, %r26)
  1020. sync
  1021. bv %r0(%r2)
  1022. nop
  1023. .exit
  1024. .procend
  1025. ENDPROC_CFI(flush_kernel_icache_range_asm)
  1026. __INIT
  1027. /* align should cover use of rfi in disable_sr_hashing_asm and
  1028. * srdis_done.
  1029. */
  1030. .align 256
  1031. ENTRY_CFI(disable_sr_hashing_asm)
  1032. .proc
  1033. .callinfo NO_CALLS
  1034. .entry
  1035. /*
  1036. * Switch to real mode
  1037. */
  1038. /* pcxt_ssm_bug */
  1039. rsm PSW_SM_I, %r0
  1040. load32 PA(1f), %r1
  1041. nop
  1042. nop
  1043. nop
  1044. nop
  1045. nop
  1046. rsm PSW_SM_Q, %r0 /* prep to load iia queue */
  1047. mtctl %r0, %cr17 /* Clear IIASQ tail */
  1048. mtctl %r0, %cr17 /* Clear IIASQ head */
  1049. mtctl %r1, %cr18 /* IIAOQ head */
  1050. ldo 4(%r1), %r1
  1051. mtctl %r1, %cr18 /* IIAOQ tail */
  1052. load32 REAL_MODE_PSW, %r1
  1053. mtctl %r1, %ipsw
  1054. rfi
  1055. nop
  1056. 1: cmpib,=,n SRHASH_PCXST, %r26,srdis_pcxs
  1057. cmpib,=,n SRHASH_PCXL, %r26,srdis_pcxl
  1058. cmpib,=,n SRHASH_PA20, %r26,srdis_pa20
  1059. b,n srdis_done
  1060. srdis_pcxs:
  1061. /* Disable Space Register Hashing for PCXS,PCXT,PCXT' */
  1062. .word 0x141c1a00 /* mfdiag %dr0, %r28 */
  1063. .word 0x141c1a00 /* must issue twice */
  1064. depwi 0,18,1, %r28 /* Clear DHE (dcache hash enable) */
  1065. depwi 0,20,1, %r28 /* Clear IHE (icache hash enable) */
  1066. .word 0x141c1600 /* mtdiag %r28, %dr0 */
  1067. .word 0x141c1600 /* must issue twice */
  1068. b,n srdis_done
  1069. srdis_pcxl:
  1070. /* Disable Space Register Hashing for PCXL */
  1071. .word 0x141c0600 /* mfdiag %dr0, %r28 */
  1072. depwi 0,28,2, %r28 /* Clear DHASH_EN & IHASH_EN */
  1073. .word 0x141c0240 /* mtdiag %r28, %dr0 */
  1074. b,n srdis_done
  1075. srdis_pa20:
  1076. /* Disable Space Register Hashing for PCXU,PCXU+,PCXW,PCXW+,PCXW2 */
  1077. .word 0x144008bc /* mfdiag %dr2, %r28 */
  1078. depdi 0, 54,1, %r28 /* clear DIAG_SPHASH_ENAB (bit 54) */
  1079. .word 0x145c1840 /* mtdiag %r28, %dr2 */
  1080. srdis_done:
  1081. /* Switch back to virtual mode */
  1082. rsm PSW_SM_I, %r0 /* prep to load iia queue */
  1083. load32 2f, %r1
  1084. nop
  1085. nop
  1086. nop
  1087. nop
  1088. nop
  1089. rsm PSW_SM_Q, %r0 /* prep to load iia queue */
  1090. mtctl %r0, %cr17 /* Clear IIASQ tail */
  1091. mtctl %r0, %cr17 /* Clear IIASQ head */
  1092. mtctl %r1, %cr18 /* IIAOQ head */
  1093. ldo 4(%r1), %r1
  1094. mtctl %r1, %cr18 /* IIAOQ tail */
  1095. load32 KERNEL_PSW, %r1
  1096. mtctl %r1, %ipsw
  1097. rfi
  1098. nop
  1099. 2: bv %r0(%r2)
  1100. nop
  1101. .exit
  1102. .procend
  1103. ENDPROC_CFI(disable_sr_hashing_asm)
  1104. .end