page.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2003, 04, 05 Ralf Baechle (ralf@linux-mips.org)
  7. * Copyright (C) 2007 Maciej W. Rozycki
  8. * Copyright (C) 2008 Thiemo Seufer
  9. * Copyright (C) 2012 MIPS Technologies, Inc.
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/sched.h>
  13. #include <linux/smp.h>
  14. #include <linux/mm.h>
  15. #include <linux/module.h>
  16. #include <linux/proc_fs.h>
  17. #include <asm/bugs.h>
  18. #include <asm/cacheops.h>
  19. #include <asm/cpu-type.h>
  20. #include <asm/inst.h>
  21. #include <asm/io.h>
  22. #include <asm/page.h>
  23. #include <asm/pgtable.h>
  24. #include <asm/prefetch.h>
  25. #include <asm/bootinfo.h>
  26. #include <asm/mipsregs.h>
  27. #include <asm/mmu_context.h>
  28. #include <asm/cpu.h>
  29. #include <asm/war.h>
  30. #ifdef CONFIG_SIBYTE_DMA_PAGEOPS
  31. #include <asm/sibyte/sb1250.h>
  32. #include <asm/sibyte/sb1250_regs.h>
  33. #include <asm/sibyte/sb1250_dma.h>
  34. #endif
  35. #include <asm/uasm.h>
  36. /* Registers used in the assembled routines. */
  37. #define ZERO 0
  38. #define AT 2
  39. #define A0 4
  40. #define A1 5
  41. #define A2 6
  42. #define T0 8
  43. #define T1 9
  44. #define T2 10
  45. #define T3 11
  46. #define T9 25
  47. #define RA 31
  48. /* Handle labels (which must be positive integers). */
  49. enum label_id {
  50. label_clear_nopref = 1,
  51. label_clear_pref,
  52. label_copy_nopref,
  53. label_copy_pref_both,
  54. label_copy_pref_store,
  55. };
  56. UASM_L_LA(_clear_nopref)
  57. UASM_L_LA(_clear_pref)
  58. UASM_L_LA(_copy_nopref)
  59. UASM_L_LA(_copy_pref_both)
  60. UASM_L_LA(_copy_pref_store)
  61. /* We need one branch and therefore one relocation per target label. */
  62. static struct uasm_label labels[5];
  63. static struct uasm_reloc relocs[5];
  64. #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
  65. #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
  66. /*
  67. * R6 has a limited offset of the pref instruction.
  68. * Skip it if the offset is more than 9 bits.
  69. */
  70. #define _uasm_i_pref(a, b, c, d) \
  71. do { \
  72. if (cpu_has_mips_r6) { \
  73. if (c <= 0xff && c >= -0x100) \
  74. uasm_i_pref(a, b, c, d);\
  75. } else { \
  76. uasm_i_pref(a, b, c, d); \
  77. } \
  78. } while(0)
  79. static int pref_bias_clear_store;
  80. static int pref_bias_copy_load;
  81. static int pref_bias_copy_store;
  82. static u32 pref_src_mode;
  83. static u32 pref_dst_mode;
  84. static int clear_word_size;
  85. static int copy_word_size;
  86. static int half_clear_loop_size;
  87. static int half_copy_loop_size;
  88. static int cache_line_size;
  89. #define cache_line_mask() (cache_line_size - 1)
  90. static inline void
  91. pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off)
  92. {
  93. if (cpu_has_64bit_gp_regs && DADDI_WAR && r4k_daddiu_bug()) {
  94. if (off > 0x7fff) {
  95. uasm_i_lui(buf, T9, uasm_rel_hi(off));
  96. uasm_i_addiu(buf, T9, T9, uasm_rel_lo(off));
  97. } else
  98. uasm_i_addiu(buf, T9, ZERO, off);
  99. uasm_i_daddu(buf, reg1, reg2, T9);
  100. } else {
  101. if (off > 0x7fff) {
  102. uasm_i_lui(buf, T9, uasm_rel_hi(off));
  103. uasm_i_addiu(buf, T9, T9, uasm_rel_lo(off));
  104. UASM_i_ADDU(buf, reg1, reg2, T9);
  105. } else
  106. UASM_i_ADDIU(buf, reg1, reg2, off);
  107. }
  108. }
  109. static void set_prefetch_parameters(void)
  110. {
  111. if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg)
  112. clear_word_size = 8;
  113. else
  114. clear_word_size = 4;
  115. if (cpu_has_64bit_gp_regs)
  116. copy_word_size = 8;
  117. else
  118. copy_word_size = 4;
  119. /*
  120. * The pref's used here are using "streaming" hints, which cause the
  121. * copied data to be kicked out of the cache sooner. A page copy often
  122. * ends up copying a lot more data than is commonly used, so this seems
  123. * to make sense in terms of reducing cache pollution, but I've no real
  124. * performance data to back this up.
  125. */
  126. if (cpu_has_prefetch) {
  127. /*
  128. * XXX: Most prefetch bias values in here are based on
  129. * guesswork.
  130. */
  131. cache_line_size = cpu_dcache_line_size();
  132. switch (current_cpu_type()) {
  133. case CPU_R5500:
  134. case CPU_TX49XX:
  135. /* These processors only support the Pref_Load. */
  136. pref_bias_copy_load = 256;
  137. break;
  138. case CPU_R10000:
  139. case CPU_R12000:
  140. case CPU_R14000:
  141. case CPU_R16000:
  142. /*
  143. * Those values have been experimentally tuned for an
  144. * Origin 200.
  145. */
  146. pref_bias_clear_store = 512;
  147. pref_bias_copy_load = 256;
  148. pref_bias_copy_store = 256;
  149. pref_src_mode = Pref_LoadStreamed;
  150. pref_dst_mode = Pref_StoreStreamed;
  151. break;
  152. case CPU_SB1:
  153. case CPU_SB1A:
  154. pref_bias_clear_store = 128;
  155. pref_bias_copy_load = 128;
  156. pref_bias_copy_store = 128;
  157. /*
  158. * SB1 pass1 Pref_LoadStreamed/Pref_StoreStreamed
  159. * hints are broken.
  160. */
  161. if (current_cpu_type() == CPU_SB1 &&
  162. (current_cpu_data.processor_id & 0xff) < 0x02) {
  163. pref_src_mode = Pref_Load;
  164. pref_dst_mode = Pref_Store;
  165. } else {
  166. pref_src_mode = Pref_LoadStreamed;
  167. pref_dst_mode = Pref_StoreStreamed;
  168. }
  169. break;
  170. case CPU_LOONGSON3:
  171. /* Loongson-3 only support the Pref_Load/Pref_Store. */
  172. pref_bias_clear_store = 128;
  173. pref_bias_copy_load = 128;
  174. pref_bias_copy_store = 128;
  175. pref_src_mode = Pref_Load;
  176. pref_dst_mode = Pref_Store;
  177. break;
  178. default:
  179. pref_bias_clear_store = 128;
  180. pref_bias_copy_load = 256;
  181. pref_bias_copy_store = 128;
  182. pref_src_mode = Pref_LoadStreamed;
  183. if (cpu_has_mips_r6)
  184. /*
  185. * Bit 30 (Pref_PrepareForStore) has been
  186. * removed from MIPS R6. Use bit 5
  187. * (Pref_StoreStreamed).
  188. */
  189. pref_dst_mode = Pref_StoreStreamed;
  190. else
  191. pref_dst_mode = Pref_PrepareForStore;
  192. break;
  193. }
  194. } else {
  195. if (cpu_has_cache_cdex_s)
  196. cache_line_size = cpu_scache_line_size();
  197. else if (cpu_has_cache_cdex_p)
  198. cache_line_size = cpu_dcache_line_size();
  199. }
  200. /*
  201. * Too much unrolling will overflow the available space in
  202. * clear_space_array / copy_page_array.
  203. */
  204. half_clear_loop_size = min(16 * clear_word_size,
  205. max(cache_line_size >> 1,
  206. 4 * clear_word_size));
  207. half_copy_loop_size = min(16 * copy_word_size,
  208. max(cache_line_size >> 1,
  209. 4 * copy_word_size));
  210. }
  211. static void build_clear_store(u32 **buf, int off)
  212. {
  213. if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg) {
  214. uasm_i_sd(buf, ZERO, off, A0);
  215. } else {
  216. uasm_i_sw(buf, ZERO, off, A0);
  217. }
  218. }
  219. static inline void build_clear_pref(u32 **buf, int off)
  220. {
  221. if (off & cache_line_mask())
  222. return;
  223. if (pref_bias_clear_store) {
  224. _uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off,
  225. A0);
  226. } else if (cache_line_size == (half_clear_loop_size << 1)) {
  227. if (cpu_has_cache_cdex_s) {
  228. uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
  229. } else if (cpu_has_cache_cdex_p) {
  230. if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
  231. uasm_i_nop(buf);
  232. uasm_i_nop(buf);
  233. uasm_i_nop(buf);
  234. uasm_i_nop(buf);
  235. }
  236. if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
  237. uasm_i_lw(buf, ZERO, ZERO, AT);
  238. uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
  239. }
  240. }
  241. }
  242. extern u32 __clear_page_start;
  243. extern u32 __clear_page_end;
  244. extern u32 __copy_page_start;
  245. extern u32 __copy_page_end;
  246. void build_clear_page(void)
  247. {
  248. int off;
  249. u32 *buf = &__clear_page_start;
  250. struct uasm_label *l = labels;
  251. struct uasm_reloc *r = relocs;
  252. int i;
  253. static atomic_t run_once = ATOMIC_INIT(0);
  254. if (atomic_xchg(&run_once, 1)) {
  255. return;
  256. }
  257. memset(labels, 0, sizeof(labels));
  258. memset(relocs, 0, sizeof(relocs));
  259. set_prefetch_parameters();
  260. /*
  261. * This algorithm makes the following assumptions:
  262. * - The prefetch bias is a multiple of 2 words.
  263. * - The prefetch bias is less than one page.
  264. */
  265. BUG_ON(pref_bias_clear_store % (2 * clear_word_size));
  266. BUG_ON(PAGE_SIZE < pref_bias_clear_store);
  267. off = PAGE_SIZE - pref_bias_clear_store;
  268. if (off > 0xffff || !pref_bias_clear_store)
  269. pg_addiu(&buf, A2, A0, off);
  270. else
  271. uasm_i_ori(&buf, A2, A0, off);
  272. if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
  273. uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000));
  274. off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size)
  275. * cache_line_size : 0;
  276. while (off) {
  277. build_clear_pref(&buf, -off);
  278. off -= cache_line_size;
  279. }
  280. uasm_l_clear_pref(&l, buf);
  281. do {
  282. build_clear_pref(&buf, off);
  283. build_clear_store(&buf, off);
  284. off += clear_word_size;
  285. } while (off < half_clear_loop_size);
  286. pg_addiu(&buf, A0, A0, 2 * off);
  287. off = -off;
  288. do {
  289. build_clear_pref(&buf, off);
  290. if (off == -clear_word_size)
  291. uasm_il_bne(&buf, &r, A0, A2, label_clear_pref);
  292. build_clear_store(&buf, off);
  293. off += clear_word_size;
  294. } while (off < 0);
  295. if (pref_bias_clear_store) {
  296. pg_addiu(&buf, A2, A0, pref_bias_clear_store);
  297. uasm_l_clear_nopref(&l, buf);
  298. off = 0;
  299. do {
  300. build_clear_store(&buf, off);
  301. off += clear_word_size;
  302. } while (off < half_clear_loop_size);
  303. pg_addiu(&buf, A0, A0, 2 * off);
  304. off = -off;
  305. do {
  306. if (off == -clear_word_size)
  307. uasm_il_bne(&buf, &r, A0, A2,
  308. label_clear_nopref);
  309. build_clear_store(&buf, off);
  310. off += clear_word_size;
  311. } while (off < 0);
  312. }
  313. uasm_i_jr(&buf, RA);
  314. uasm_i_nop(&buf);
  315. BUG_ON(buf > &__clear_page_end);
  316. uasm_resolve_relocs(relocs, labels);
  317. pr_debug("Synthesized clear page handler (%u instructions).\n",
  318. (u32)(buf - &__clear_page_start));
  319. pr_debug("\t.set push\n");
  320. pr_debug("\t.set noreorder\n");
  321. for (i = 0; i < (buf - &__clear_page_start); i++)
  322. pr_debug("\t.word 0x%08x\n", (&__clear_page_start)[i]);
  323. pr_debug("\t.set pop\n");
  324. }
  325. static void build_copy_load(u32 **buf, int reg, int off)
  326. {
  327. if (cpu_has_64bit_gp_regs) {
  328. uasm_i_ld(buf, reg, off, A1);
  329. } else {
  330. uasm_i_lw(buf, reg, off, A1);
  331. }
  332. }
  333. static void build_copy_store(u32 **buf, int reg, int off)
  334. {
  335. if (cpu_has_64bit_gp_regs) {
  336. uasm_i_sd(buf, reg, off, A0);
  337. } else {
  338. uasm_i_sw(buf, reg, off, A0);
  339. }
  340. }
  341. static inline void build_copy_load_pref(u32 **buf, int off)
  342. {
  343. if (off & cache_line_mask())
  344. return;
  345. if (pref_bias_copy_load)
  346. _uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, A1);
  347. }
  348. static inline void build_copy_store_pref(u32 **buf, int off)
  349. {
  350. if (off & cache_line_mask())
  351. return;
  352. if (pref_bias_copy_store) {
  353. _uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off,
  354. A0);
  355. } else if (cache_line_size == (half_copy_loop_size << 1)) {
  356. if (cpu_has_cache_cdex_s) {
  357. uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
  358. } else if (cpu_has_cache_cdex_p) {
  359. if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
  360. uasm_i_nop(buf);
  361. uasm_i_nop(buf);
  362. uasm_i_nop(buf);
  363. uasm_i_nop(buf);
  364. }
  365. if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
  366. uasm_i_lw(buf, ZERO, ZERO, AT);
  367. uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
  368. }
  369. }
  370. }
  371. void build_copy_page(void)
  372. {
  373. int off;
  374. u32 *buf = &__copy_page_start;
  375. struct uasm_label *l = labels;
  376. struct uasm_reloc *r = relocs;
  377. int i;
  378. static atomic_t run_once = ATOMIC_INIT(0);
  379. if (atomic_xchg(&run_once, 1)) {
  380. return;
  381. }
  382. memset(labels, 0, sizeof(labels));
  383. memset(relocs, 0, sizeof(relocs));
  384. set_prefetch_parameters();
  385. /*
  386. * This algorithm makes the following assumptions:
  387. * - All prefetch biases are multiples of 8 words.
  388. * - The prefetch biases are less than one page.
  389. * - The store prefetch bias isn't greater than the load
  390. * prefetch bias.
  391. */
  392. BUG_ON(pref_bias_copy_load % (8 * copy_word_size));
  393. BUG_ON(pref_bias_copy_store % (8 * copy_word_size));
  394. BUG_ON(PAGE_SIZE < pref_bias_copy_load);
  395. BUG_ON(pref_bias_copy_store > pref_bias_copy_load);
  396. off = PAGE_SIZE - pref_bias_copy_load;
  397. if (off > 0xffff || !pref_bias_copy_load)
  398. pg_addiu(&buf, A2, A0, off);
  399. else
  400. uasm_i_ori(&buf, A2, A0, off);
  401. if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
  402. uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000));
  403. off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) *
  404. cache_line_size : 0;
  405. while (off) {
  406. build_copy_load_pref(&buf, -off);
  407. off -= cache_line_size;
  408. }
  409. off = cache_line_size ? min(8, pref_bias_copy_store / cache_line_size) *
  410. cache_line_size : 0;
  411. while (off) {
  412. build_copy_store_pref(&buf, -off);
  413. off -= cache_line_size;
  414. }
  415. uasm_l_copy_pref_both(&l, buf);
  416. do {
  417. build_copy_load_pref(&buf, off);
  418. build_copy_load(&buf, T0, off);
  419. build_copy_load_pref(&buf, off + copy_word_size);
  420. build_copy_load(&buf, T1, off + copy_word_size);
  421. build_copy_load_pref(&buf, off + 2 * copy_word_size);
  422. build_copy_load(&buf, T2, off + 2 * copy_word_size);
  423. build_copy_load_pref(&buf, off + 3 * copy_word_size);
  424. build_copy_load(&buf, T3, off + 3 * copy_word_size);
  425. build_copy_store_pref(&buf, off);
  426. build_copy_store(&buf, T0, off);
  427. build_copy_store_pref(&buf, off + copy_word_size);
  428. build_copy_store(&buf, T1, off + copy_word_size);
  429. build_copy_store_pref(&buf, off + 2 * copy_word_size);
  430. build_copy_store(&buf, T2, off + 2 * copy_word_size);
  431. build_copy_store_pref(&buf, off + 3 * copy_word_size);
  432. build_copy_store(&buf, T3, off + 3 * copy_word_size);
  433. off += 4 * copy_word_size;
  434. } while (off < half_copy_loop_size);
  435. pg_addiu(&buf, A1, A1, 2 * off);
  436. pg_addiu(&buf, A0, A0, 2 * off);
  437. off = -off;
  438. do {
  439. build_copy_load_pref(&buf, off);
  440. build_copy_load(&buf, T0, off);
  441. build_copy_load_pref(&buf, off + copy_word_size);
  442. build_copy_load(&buf, T1, off + copy_word_size);
  443. build_copy_load_pref(&buf, off + 2 * copy_word_size);
  444. build_copy_load(&buf, T2, off + 2 * copy_word_size);
  445. build_copy_load_pref(&buf, off + 3 * copy_word_size);
  446. build_copy_load(&buf, T3, off + 3 * copy_word_size);
  447. build_copy_store_pref(&buf, off);
  448. build_copy_store(&buf, T0, off);
  449. build_copy_store_pref(&buf, off + copy_word_size);
  450. build_copy_store(&buf, T1, off + copy_word_size);
  451. build_copy_store_pref(&buf, off + 2 * copy_word_size);
  452. build_copy_store(&buf, T2, off + 2 * copy_word_size);
  453. build_copy_store_pref(&buf, off + 3 * copy_word_size);
  454. if (off == -(4 * copy_word_size))
  455. uasm_il_bne(&buf, &r, A2, A0, label_copy_pref_both);
  456. build_copy_store(&buf, T3, off + 3 * copy_word_size);
  457. off += 4 * copy_word_size;
  458. } while (off < 0);
  459. if (pref_bias_copy_load - pref_bias_copy_store) {
  460. pg_addiu(&buf, A2, A0,
  461. pref_bias_copy_load - pref_bias_copy_store);
  462. uasm_l_copy_pref_store(&l, buf);
  463. off = 0;
  464. do {
  465. build_copy_load(&buf, T0, off);
  466. build_copy_load(&buf, T1, off + copy_word_size);
  467. build_copy_load(&buf, T2, off + 2 * copy_word_size);
  468. build_copy_load(&buf, T3, off + 3 * copy_word_size);
  469. build_copy_store_pref(&buf, off);
  470. build_copy_store(&buf, T0, off);
  471. build_copy_store_pref(&buf, off + copy_word_size);
  472. build_copy_store(&buf, T1, off + copy_word_size);
  473. build_copy_store_pref(&buf, off + 2 * copy_word_size);
  474. build_copy_store(&buf, T2, off + 2 * copy_word_size);
  475. build_copy_store_pref(&buf, off + 3 * copy_word_size);
  476. build_copy_store(&buf, T3, off + 3 * copy_word_size);
  477. off += 4 * copy_word_size;
  478. } while (off < half_copy_loop_size);
  479. pg_addiu(&buf, A1, A1, 2 * off);
  480. pg_addiu(&buf, A0, A0, 2 * off);
  481. off = -off;
  482. do {
  483. build_copy_load(&buf, T0, off);
  484. build_copy_load(&buf, T1, off + copy_word_size);
  485. build_copy_load(&buf, T2, off + 2 * copy_word_size);
  486. build_copy_load(&buf, T3, off + 3 * copy_word_size);
  487. build_copy_store_pref(&buf, off);
  488. build_copy_store(&buf, T0, off);
  489. build_copy_store_pref(&buf, off + copy_word_size);
  490. build_copy_store(&buf, T1, off + copy_word_size);
  491. build_copy_store_pref(&buf, off + 2 * copy_word_size);
  492. build_copy_store(&buf, T2, off + 2 * copy_word_size);
  493. build_copy_store_pref(&buf, off + 3 * copy_word_size);
  494. if (off == -(4 * copy_word_size))
  495. uasm_il_bne(&buf, &r, A2, A0,
  496. label_copy_pref_store);
  497. build_copy_store(&buf, T3, off + 3 * copy_word_size);
  498. off += 4 * copy_word_size;
  499. } while (off < 0);
  500. }
  501. if (pref_bias_copy_store) {
  502. pg_addiu(&buf, A2, A0, pref_bias_copy_store);
  503. uasm_l_copy_nopref(&l, buf);
  504. off = 0;
  505. do {
  506. build_copy_load(&buf, T0, off);
  507. build_copy_load(&buf, T1, off + copy_word_size);
  508. build_copy_load(&buf, T2, off + 2 * copy_word_size);
  509. build_copy_load(&buf, T3, off + 3 * copy_word_size);
  510. build_copy_store(&buf, T0, off);
  511. build_copy_store(&buf, T1, off + copy_word_size);
  512. build_copy_store(&buf, T2, off + 2 * copy_word_size);
  513. build_copy_store(&buf, T3, off + 3 * copy_word_size);
  514. off += 4 * copy_word_size;
  515. } while (off < half_copy_loop_size);
  516. pg_addiu(&buf, A1, A1, 2 * off);
  517. pg_addiu(&buf, A0, A0, 2 * off);
  518. off = -off;
  519. do {
  520. build_copy_load(&buf, T0, off);
  521. build_copy_load(&buf, T1, off + copy_word_size);
  522. build_copy_load(&buf, T2, off + 2 * copy_word_size);
  523. build_copy_load(&buf, T3, off + 3 * copy_word_size);
  524. build_copy_store(&buf, T0, off);
  525. build_copy_store(&buf, T1, off + copy_word_size);
  526. build_copy_store(&buf, T2, off + 2 * copy_word_size);
  527. if (off == -(4 * copy_word_size))
  528. uasm_il_bne(&buf, &r, A2, A0,
  529. label_copy_nopref);
  530. build_copy_store(&buf, T3, off + 3 * copy_word_size);
  531. off += 4 * copy_word_size;
  532. } while (off < 0);
  533. }
  534. uasm_i_jr(&buf, RA);
  535. uasm_i_nop(&buf);
  536. BUG_ON(buf > &__copy_page_end);
  537. uasm_resolve_relocs(relocs, labels);
  538. pr_debug("Synthesized copy page handler (%u instructions).\n",
  539. (u32)(buf - &__copy_page_start));
  540. pr_debug("\t.set push\n");
  541. pr_debug("\t.set noreorder\n");
  542. for (i = 0; i < (buf - &__copy_page_start); i++)
  543. pr_debug("\t.word 0x%08x\n", (&__copy_page_start)[i]);
  544. pr_debug("\t.set pop\n");
  545. }
  546. #ifdef CONFIG_SIBYTE_DMA_PAGEOPS
  547. extern void clear_page_cpu(void *page);
  548. extern void copy_page_cpu(void *to, void *from);
  549. /*
  550. * Pad descriptors to cacheline, since each is exclusively owned by a
  551. * particular CPU.
  552. */
  553. struct dmadscr {
  554. u64 dscr_a;
  555. u64 dscr_b;
  556. u64 pad_a;
  557. u64 pad_b;
  558. } ____cacheline_aligned_in_smp page_descr[DM_NUM_CHANNELS];
  559. void sb1_dma_init(void)
  560. {
  561. int i;
  562. for (i = 0; i < DM_NUM_CHANNELS; i++) {
  563. const u64 base_val = CPHYSADDR((unsigned long)&page_descr[i]) |
  564. V_DM_DSCR_BASE_RINGSZ(1);
  565. void *base_reg = IOADDR(A_DM_REGISTER(i, R_DM_DSCR_BASE));
  566. __raw_writeq(base_val, base_reg);
  567. __raw_writeq(base_val | M_DM_DSCR_BASE_RESET, base_reg);
  568. __raw_writeq(base_val | M_DM_DSCR_BASE_ENABL, base_reg);
  569. }
  570. }
  571. void clear_page(void *page)
  572. {
  573. u64 to_phys = CPHYSADDR((unsigned long)page);
  574. unsigned int cpu = smp_processor_id();
  575. /* if the page is not in KSEG0, use old way */
  576. if ((long)KSEGX((unsigned long)page) != (long)CKSEG0)
  577. return clear_page_cpu(page);
  578. page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_ZERO_MEM |
  579. M_DM_DSCRA_L2C_DEST | M_DM_DSCRA_INTERRUPT;
  580. page_descr[cpu].dscr_b = V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
  581. __raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
  582. /*
  583. * Don't really want to do it this way, but there's no
  584. * reliable way to delay completion detection.
  585. */
  586. while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)))
  587. & M_DM_DSCR_BASE_INTERRUPT))
  588. ;
  589. __raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
  590. }
  591. void copy_page(void *to, void *from)
  592. {
  593. u64 from_phys = CPHYSADDR((unsigned long)from);
  594. u64 to_phys = CPHYSADDR((unsigned long)to);
  595. unsigned int cpu = smp_processor_id();
  596. /* if any page is not in KSEG0, use old way */
  597. if ((long)KSEGX((unsigned long)to) != (long)CKSEG0
  598. || (long)KSEGX((unsigned long)from) != (long)CKSEG0)
  599. return copy_page_cpu(to, from);
  600. page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_L2C_DEST |
  601. M_DM_DSCRA_INTERRUPT;
  602. page_descr[cpu].dscr_b = from_phys | V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
  603. __raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
  604. /*
  605. * Don't really want to do it this way, but there's no
  606. * reliable way to delay completion detection.
  607. */
  608. while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)))
  609. & M_DM_DSCR_BASE_INTERRUPT))
  610. ;
  611. __raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
  612. }
  613. #endif /* CONFIG_SIBYTE_DMA_PAGEOPS */