single_step.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. *
  14. * A code-rewriter that enables instruction single-stepping.
  15. */
  16. #include <linux/smp.h>
  17. #include <linux/ptrace.h>
  18. #include <linux/slab.h>
  19. #include <linux/thread_info.h>
  20. #include <linux/uaccess.h>
  21. #include <linux/mman.h>
  22. #include <linux/types.h>
  23. #include <linux/err.h>
  24. #include <linux/prctl.h>
  25. #include <linux/context_tracking.h>
  26. #include <asm/cacheflush.h>
  27. #include <asm/traps.h>
  28. #include <asm/uaccess.h>
  29. #include <asm/unaligned.h>
  30. #include <arch/abi.h>
  31. #include <arch/spr_def.h>
  32. #include <arch/opcode.h>
  33. #ifndef __tilegx__ /* Hardware support for single step unavailable. */
  34. #define signExtend17(val) sign_extend((val), 17)
  35. #define TILE_X1_MASK (0xffffffffULL << 31)
  36. enum mem_op {
  37. MEMOP_NONE,
  38. MEMOP_LOAD,
  39. MEMOP_STORE,
  40. MEMOP_LOAD_POSTINCR,
  41. MEMOP_STORE_POSTINCR
  42. };
  43. static inline tilepro_bundle_bits set_BrOff_X1(tilepro_bundle_bits n,
  44. s32 offset)
  45. {
  46. tilepro_bundle_bits result;
  47. /* mask out the old offset */
  48. tilepro_bundle_bits mask = create_BrOff_X1(-1);
  49. result = n & (~mask);
  50. /* or in the new offset */
  51. result |= create_BrOff_X1(offset);
  52. return result;
  53. }
  54. static inline tilepro_bundle_bits move_X1(tilepro_bundle_bits n, int dest,
  55. int src)
  56. {
  57. tilepro_bundle_bits result;
  58. tilepro_bundle_bits op;
  59. result = n & (~TILE_X1_MASK);
  60. op = create_Opcode_X1(SPECIAL_0_OPCODE_X1) |
  61. create_RRROpcodeExtension_X1(OR_SPECIAL_0_OPCODE_X1) |
  62. create_Dest_X1(dest) |
  63. create_SrcB_X1(TREG_ZERO) |
  64. create_SrcA_X1(src) ;
  65. result |= op;
  66. return result;
  67. }
  68. static inline tilepro_bundle_bits nop_X1(tilepro_bundle_bits n)
  69. {
  70. return move_X1(n, TREG_ZERO, TREG_ZERO);
  71. }
  72. static inline tilepro_bundle_bits addi_X1(
  73. tilepro_bundle_bits n, int dest, int src, int imm)
  74. {
  75. n &= ~TILE_X1_MASK;
  76. n |= (create_SrcA_X1(src) |
  77. create_Dest_X1(dest) |
  78. create_Imm8_X1(imm) |
  79. create_S_X1(0) |
  80. create_Opcode_X1(IMM_0_OPCODE_X1) |
  81. create_ImmOpcodeExtension_X1(ADDI_IMM_0_OPCODE_X1));
  82. return n;
  83. }
  84. static tilepro_bundle_bits rewrite_load_store_unaligned(
  85. struct single_step_state *state,
  86. tilepro_bundle_bits bundle,
  87. struct pt_regs *regs,
  88. enum mem_op mem_op,
  89. int size, int sign_ext)
  90. {
  91. unsigned char __user *addr;
  92. int val_reg, addr_reg, err, val;
  93. int align_ctl;
  94. align_ctl = unaligned_fixup;
  95. switch (task_thread_info(current)->align_ctl) {
  96. case PR_UNALIGN_NOPRINT:
  97. align_ctl = 1;
  98. break;
  99. case PR_UNALIGN_SIGBUS:
  100. align_ctl = 0;
  101. break;
  102. }
  103. /* Get address and value registers */
  104. if (bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK) {
  105. addr_reg = get_SrcA_Y2(bundle);
  106. val_reg = get_SrcBDest_Y2(bundle);
  107. } else if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) {
  108. addr_reg = get_SrcA_X1(bundle);
  109. val_reg = get_Dest_X1(bundle);
  110. } else {
  111. addr_reg = get_SrcA_X1(bundle);
  112. val_reg = get_SrcB_X1(bundle);
  113. }
  114. /*
  115. * If registers are not GPRs, don't try to handle it.
  116. *
  117. * FIXME: we could handle non-GPR loads by getting the real value
  118. * from memory, writing it to the single step buffer, using a
  119. * temp_reg to hold a pointer to that memory, then executing that
  120. * instruction and resetting temp_reg. For non-GPR stores, it's a
  121. * little trickier; we could use the single step buffer for that
  122. * too, but we'd have to add some more state bits so that we could
  123. * call back in here to copy that value to the real target. For
  124. * now, we just handle the simple case.
  125. */
  126. if ((val_reg >= PTREGS_NR_GPRS &&
  127. (val_reg != TREG_ZERO ||
  128. mem_op == MEMOP_LOAD ||
  129. mem_op == MEMOP_LOAD_POSTINCR)) ||
  130. addr_reg >= PTREGS_NR_GPRS)
  131. return bundle;
  132. /* If it's aligned, don't handle it specially */
  133. addr = (void __user *)regs->regs[addr_reg];
  134. if (((unsigned long)addr % size) == 0)
  135. return bundle;
  136. /*
  137. * Return SIGBUS with the unaligned address, if requested.
  138. * Note that we return SIGBUS even for completely invalid addresses
  139. * as long as they are in fact unaligned; this matches what the
  140. * tilepro hardware would be doing, if it could provide us with the
  141. * actual bad address in an SPR, which it doesn't.
  142. */
  143. if (align_ctl == 0) {
  144. siginfo_t info = {
  145. .si_signo = SIGBUS,
  146. .si_code = BUS_ADRALN,
  147. .si_addr = addr
  148. };
  149. trace_unhandled_signal("unaligned trap", regs,
  150. (unsigned long)addr, SIGBUS);
  151. force_sig_info(info.si_signo, &info, current);
  152. return (tilepro_bundle_bits) 0;
  153. }
  154. /* Handle unaligned load/store */
  155. if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) {
  156. unsigned short val_16;
  157. switch (size) {
  158. case 2:
  159. err = copy_from_user(&val_16, addr, sizeof(val_16));
  160. val = sign_ext ? ((short)val_16) : val_16;
  161. break;
  162. case 4:
  163. err = copy_from_user(&val, addr, sizeof(val));
  164. break;
  165. default:
  166. BUG();
  167. }
  168. if (err == 0) {
  169. state->update_reg = val_reg;
  170. state->update_value = val;
  171. state->update = 1;
  172. }
  173. } else {
  174. unsigned short val_16;
  175. val = (val_reg == TREG_ZERO) ? 0 : regs->regs[val_reg];
  176. switch (size) {
  177. case 2:
  178. val_16 = val;
  179. err = copy_to_user(addr, &val_16, sizeof(val_16));
  180. break;
  181. case 4:
  182. err = copy_to_user(addr, &val, sizeof(val));
  183. break;
  184. default:
  185. BUG();
  186. }
  187. }
  188. if (err) {
  189. siginfo_t info = {
  190. .si_signo = SIGBUS,
  191. .si_code = BUS_ADRALN,
  192. .si_addr = addr
  193. };
  194. trace_unhandled_signal("bad address for unaligned fixup", regs,
  195. (unsigned long)addr, SIGBUS);
  196. force_sig_info(info.si_signo, &info, current);
  197. return (tilepro_bundle_bits) 0;
  198. }
  199. if (unaligned_printk || unaligned_fixup_count == 0) {
  200. pr_info("Process %d/%s: PC %#lx: Fixup of unaligned %s at %#lx\n",
  201. current->pid, current->comm, regs->pc,
  202. mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR ?
  203. "load" : "store",
  204. (unsigned long)addr);
  205. if (!unaligned_printk) {
  206. #define P pr_info
  207. P("\n");
  208. P("Unaligned fixups in the kernel will slow your application considerably.\n");
  209. P("To find them, write a \"1\" to /proc/sys/tile/unaligned_fixup/printk,\n");
  210. P("which requests the kernel show all unaligned fixups, or write a \"0\"\n");
  211. P("to /proc/sys/tile/unaligned_fixup/enabled, in which case each unaligned\n");
  212. P("access will become a SIGBUS you can debug. No further warnings will be\n");
  213. P("shown so as to avoid additional slowdown, but you can track the number\n");
  214. P("of fixups performed via /proc/sys/tile/unaligned_fixup/count.\n");
  215. P("Use the tile-addr2line command (see \"info addr2line\") to decode PCs.\n");
  216. P("\n");
  217. #undef P
  218. }
  219. }
  220. ++unaligned_fixup_count;
  221. if (bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK) {
  222. /* Convert the Y2 instruction to a prefetch. */
  223. bundle &= ~(create_SrcBDest_Y2(-1) |
  224. create_Opcode_Y2(-1));
  225. bundle |= (create_SrcBDest_Y2(TREG_ZERO) |
  226. create_Opcode_Y2(LW_OPCODE_Y2));
  227. /* Replace the load postincr with an addi */
  228. } else if (mem_op == MEMOP_LOAD_POSTINCR) {
  229. bundle = addi_X1(bundle, addr_reg, addr_reg,
  230. get_Imm8_X1(bundle));
  231. /* Replace the store postincr with an addi */
  232. } else if (mem_op == MEMOP_STORE_POSTINCR) {
  233. bundle = addi_X1(bundle, addr_reg, addr_reg,
  234. get_Dest_Imm8_X1(bundle));
  235. } else {
  236. /* Convert the X1 instruction to a nop. */
  237. bundle &= ~(create_Opcode_X1(-1) |
  238. create_UnShOpcodeExtension_X1(-1) |
  239. create_UnOpcodeExtension_X1(-1));
  240. bundle |= (create_Opcode_X1(SHUN_0_OPCODE_X1) |
  241. create_UnShOpcodeExtension_X1(
  242. UN_0_SHUN_0_OPCODE_X1) |
  243. create_UnOpcodeExtension_X1(
  244. NOP_UN_0_SHUN_0_OPCODE_X1));
  245. }
  246. return bundle;
  247. }
  248. /*
  249. * Called after execve() has started the new image. This allows us
  250. * to reset the info state. Note that the the mmap'ed memory, if there
  251. * was any, has already been unmapped by the exec.
  252. */
  253. void single_step_execve(void)
  254. {
  255. struct thread_info *ti = current_thread_info();
  256. kfree(ti->step_state);
  257. ti->step_state = NULL;
  258. }
  259. /*
  260. * single_step_once() - entry point when single stepping has been triggered.
  261. * @regs: The machine register state
  262. *
  263. * When we arrive at this routine via a trampoline, the single step
  264. * engine copies the executing bundle to the single step buffer.
  265. * If the instruction is a condition branch, then the target is
  266. * reset to one past the next instruction. If the instruction
  267. * sets the lr, then that is noted. If the instruction is a jump
  268. * or call, then the new target pc is preserved and the current
  269. * bundle instruction set to null.
  270. *
  271. * The necessary post-single-step rewriting information is stored in
  272. * single_step_state-> We use data segment values because the
  273. * stack will be rewound when we run the rewritten single-stepped
  274. * instruction.
  275. */
  276. void single_step_once(struct pt_regs *regs)
  277. {
  278. extern tilepro_bundle_bits __single_step_ill_insn;
  279. extern tilepro_bundle_bits __single_step_j_insn;
  280. extern tilepro_bundle_bits __single_step_addli_insn;
  281. extern tilepro_bundle_bits __single_step_auli_insn;
  282. struct thread_info *info = (void *)current_thread_info();
  283. struct single_step_state *state = info->step_state;
  284. int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
  285. tilepro_bundle_bits __user *buffer, *pc;
  286. tilepro_bundle_bits bundle;
  287. int temp_reg;
  288. int target_reg = TREG_LR;
  289. int err;
  290. enum mem_op mem_op = MEMOP_NONE;
  291. int size = 0, sign_ext = 0; /* happy compiler */
  292. int align_ctl;
  293. align_ctl = unaligned_fixup;
  294. switch (task_thread_info(current)->align_ctl) {
  295. case PR_UNALIGN_NOPRINT:
  296. align_ctl = 1;
  297. break;
  298. case PR_UNALIGN_SIGBUS:
  299. align_ctl = 0;
  300. break;
  301. }
  302. asm(
  303. " .pushsection .rodata.single_step\n"
  304. " .align 8\n"
  305. " .globl __single_step_ill_insn\n"
  306. "__single_step_ill_insn:\n"
  307. " ill\n"
  308. " .globl __single_step_addli_insn\n"
  309. "__single_step_addli_insn:\n"
  310. " { nop; addli r0, zero, 0 }\n"
  311. " .globl __single_step_auli_insn\n"
  312. "__single_step_auli_insn:\n"
  313. " { nop; auli r0, r0, 0 }\n"
  314. " .globl __single_step_j_insn\n"
  315. "__single_step_j_insn:\n"
  316. " j .\n"
  317. " .popsection\n"
  318. );
  319. /*
  320. * Enable interrupts here to allow touching userspace and the like.
  321. * The callers expect this: do_trap() already has interrupts
  322. * enabled, and do_work_pending() handles functions that enable
  323. * interrupts internally.
  324. */
  325. local_irq_enable();
  326. if (state == NULL) {
  327. /* allocate a page of writable, executable memory */
  328. state = kmalloc(sizeof(struct single_step_state), GFP_KERNEL);
  329. if (state == NULL) {
  330. pr_err("Out of kernel memory trying to single-step\n");
  331. return;
  332. }
  333. /* allocate a cache line of writable, executable memory */
  334. buffer = (void __user *) vm_mmap(NULL, 0, 64,
  335. PROT_EXEC | PROT_READ | PROT_WRITE,
  336. MAP_PRIVATE | MAP_ANONYMOUS,
  337. 0);
  338. if (IS_ERR((void __force *)buffer)) {
  339. kfree(state);
  340. pr_err("Out of kernel pages trying to single-step\n");
  341. return;
  342. }
  343. state->buffer = buffer;
  344. state->is_enabled = 0;
  345. info->step_state = state;
  346. /* Validate our stored instruction patterns */
  347. BUG_ON(get_Opcode_X1(__single_step_addli_insn) !=
  348. ADDLI_OPCODE_X1);
  349. BUG_ON(get_Opcode_X1(__single_step_auli_insn) !=
  350. AULI_OPCODE_X1);
  351. BUG_ON(get_SrcA_X1(__single_step_addli_insn) != TREG_ZERO);
  352. BUG_ON(get_Dest_X1(__single_step_addli_insn) != 0);
  353. BUG_ON(get_JOffLong_X1(__single_step_j_insn) != 0);
  354. }
  355. /*
  356. * If we are returning from a syscall, we still haven't hit the
  357. * "ill" for the swint1 instruction. So back the PC up to be
  358. * pointing at the swint1, but we'll actually return directly
  359. * back to the "ill" so we come back in via SIGILL as if we
  360. * had "executed" the swint1 without ever being in kernel space.
  361. */
  362. if (regs->faultnum == INT_SWINT_1)
  363. regs->pc -= 8;
  364. pc = (tilepro_bundle_bits __user *)(regs->pc);
  365. if (get_user(bundle, pc) != 0) {
  366. pr_err("Couldn't read instruction at %p trying to step\n", pc);
  367. return;
  368. }
  369. /* We'll follow the instruction with 2 ill op bundles */
  370. state->orig_pc = (unsigned long)pc;
  371. state->next_pc = (unsigned long)(pc + 1);
  372. state->branch_next_pc = 0;
  373. state->update = 0;
  374. if (!(bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK)) {
  375. /* two wide, check for control flow */
  376. int opcode = get_Opcode_X1(bundle);
  377. switch (opcode) {
  378. /* branches */
  379. case BRANCH_OPCODE_X1:
  380. {
  381. s32 offset = signExtend17(get_BrOff_X1(bundle));
  382. /*
  383. * For branches, we use a rewriting trick to let the
  384. * hardware evaluate whether the branch is taken or
  385. * untaken. We record the target offset and then
  386. * rewrite the branch instruction to target 1 insn
  387. * ahead if the branch is taken. We then follow the
  388. * rewritten branch with two bundles, each containing
  389. * an "ill" instruction. The supervisor examines the
  390. * pc after the single step code is executed, and if
  391. * the pc is the first ill instruction, then the
  392. * branch (if any) was not taken. If the pc is the
  393. * second ill instruction, then the branch was
  394. * taken. The new pc is computed for these cases, and
  395. * inserted into the registers for the thread. If
  396. * the pc is the start of the single step code, then
  397. * an exception or interrupt was taken before the
  398. * code started processing, and the same "original"
  399. * pc is restored. This change, different from the
  400. * original implementation, has the advantage of
  401. * executing a single user instruction.
  402. */
  403. state->branch_next_pc = (unsigned long)(pc + offset);
  404. /* rewrite branch offset to go forward one bundle */
  405. bundle = set_BrOff_X1(bundle, 2);
  406. }
  407. break;
  408. /* jumps */
  409. case JALB_OPCODE_X1:
  410. case JALF_OPCODE_X1:
  411. state->update = 1;
  412. state->next_pc =
  413. (unsigned long) (pc + get_JOffLong_X1(bundle));
  414. break;
  415. case JB_OPCODE_X1:
  416. case JF_OPCODE_X1:
  417. state->next_pc =
  418. (unsigned long) (pc + get_JOffLong_X1(bundle));
  419. bundle = nop_X1(bundle);
  420. break;
  421. case SPECIAL_0_OPCODE_X1:
  422. switch (get_RRROpcodeExtension_X1(bundle)) {
  423. /* jump-register */
  424. case JALRP_SPECIAL_0_OPCODE_X1:
  425. case JALR_SPECIAL_0_OPCODE_X1:
  426. state->update = 1;
  427. state->next_pc =
  428. regs->regs[get_SrcA_X1(bundle)];
  429. break;
  430. case JRP_SPECIAL_0_OPCODE_X1:
  431. case JR_SPECIAL_0_OPCODE_X1:
  432. state->next_pc =
  433. regs->regs[get_SrcA_X1(bundle)];
  434. bundle = nop_X1(bundle);
  435. break;
  436. case LNK_SPECIAL_0_OPCODE_X1:
  437. state->update = 1;
  438. target_reg = get_Dest_X1(bundle);
  439. break;
  440. /* stores */
  441. case SH_SPECIAL_0_OPCODE_X1:
  442. mem_op = MEMOP_STORE;
  443. size = 2;
  444. break;
  445. case SW_SPECIAL_0_OPCODE_X1:
  446. mem_op = MEMOP_STORE;
  447. size = 4;
  448. break;
  449. }
  450. break;
  451. /* loads and iret */
  452. case SHUN_0_OPCODE_X1:
  453. if (get_UnShOpcodeExtension_X1(bundle) ==
  454. UN_0_SHUN_0_OPCODE_X1) {
  455. switch (get_UnOpcodeExtension_X1(bundle)) {
  456. case LH_UN_0_SHUN_0_OPCODE_X1:
  457. mem_op = MEMOP_LOAD;
  458. size = 2;
  459. sign_ext = 1;
  460. break;
  461. case LH_U_UN_0_SHUN_0_OPCODE_X1:
  462. mem_op = MEMOP_LOAD;
  463. size = 2;
  464. sign_ext = 0;
  465. break;
  466. case LW_UN_0_SHUN_0_OPCODE_X1:
  467. mem_op = MEMOP_LOAD;
  468. size = 4;
  469. break;
  470. case IRET_UN_0_SHUN_0_OPCODE_X1:
  471. {
  472. unsigned long ex0_0 = __insn_mfspr(
  473. SPR_EX_CONTEXT_0_0);
  474. unsigned long ex0_1 = __insn_mfspr(
  475. SPR_EX_CONTEXT_0_1);
  476. /*
  477. * Special-case it if we're iret'ing
  478. * to PL0 again. Otherwise just let
  479. * it run and it will generate SIGILL.
  480. */
  481. if (EX1_PL(ex0_1) == USER_PL) {
  482. state->next_pc = ex0_0;
  483. regs->ex1 = ex0_1;
  484. bundle = nop_X1(bundle);
  485. }
  486. }
  487. }
  488. }
  489. break;
  490. /* postincrement operations */
  491. case IMM_0_OPCODE_X1:
  492. switch (get_ImmOpcodeExtension_X1(bundle)) {
  493. case LWADD_IMM_0_OPCODE_X1:
  494. mem_op = MEMOP_LOAD_POSTINCR;
  495. size = 4;
  496. break;
  497. case LHADD_IMM_0_OPCODE_X1:
  498. mem_op = MEMOP_LOAD_POSTINCR;
  499. size = 2;
  500. sign_ext = 1;
  501. break;
  502. case LHADD_U_IMM_0_OPCODE_X1:
  503. mem_op = MEMOP_LOAD_POSTINCR;
  504. size = 2;
  505. sign_ext = 0;
  506. break;
  507. case SWADD_IMM_0_OPCODE_X1:
  508. mem_op = MEMOP_STORE_POSTINCR;
  509. size = 4;
  510. break;
  511. case SHADD_IMM_0_OPCODE_X1:
  512. mem_op = MEMOP_STORE_POSTINCR;
  513. size = 2;
  514. break;
  515. default:
  516. break;
  517. }
  518. break;
  519. }
  520. if (state->update) {
  521. /*
  522. * Get an available register. We start with a
  523. * bitmask with 1's for available registers.
  524. * We truncate to the low 32 registers since
  525. * we are guaranteed to have set bits in the
  526. * low 32 bits, then use ctz to pick the first.
  527. */
  528. u32 mask = (u32) ~((1ULL << get_Dest_X0(bundle)) |
  529. (1ULL << get_SrcA_X0(bundle)) |
  530. (1ULL << get_SrcB_X0(bundle)) |
  531. (1ULL << target_reg));
  532. temp_reg = __builtin_ctz(mask);
  533. state->update_reg = temp_reg;
  534. state->update_value = regs->regs[temp_reg];
  535. regs->regs[temp_reg] = (unsigned long) (pc+1);
  536. regs->flags |= PT_FLAGS_RESTORE_REGS;
  537. bundle = move_X1(bundle, target_reg, temp_reg);
  538. }
  539. } else {
  540. int opcode = get_Opcode_Y2(bundle);
  541. switch (opcode) {
  542. /* loads */
  543. case LH_OPCODE_Y2:
  544. mem_op = MEMOP_LOAD;
  545. size = 2;
  546. sign_ext = 1;
  547. break;
  548. case LH_U_OPCODE_Y2:
  549. mem_op = MEMOP_LOAD;
  550. size = 2;
  551. sign_ext = 0;
  552. break;
  553. case LW_OPCODE_Y2:
  554. mem_op = MEMOP_LOAD;
  555. size = 4;
  556. break;
  557. /* stores */
  558. case SH_OPCODE_Y2:
  559. mem_op = MEMOP_STORE;
  560. size = 2;
  561. break;
  562. case SW_OPCODE_Y2:
  563. mem_op = MEMOP_STORE;
  564. size = 4;
  565. break;
  566. }
  567. }
  568. /*
  569. * Check if we need to rewrite an unaligned load/store.
  570. * Returning zero is a special value meaning we generated a signal.
  571. */
  572. if (mem_op != MEMOP_NONE && align_ctl >= 0) {
  573. bundle = rewrite_load_store_unaligned(state, bundle, regs,
  574. mem_op, size, sign_ext);
  575. if (bundle == 0)
  576. return;
  577. }
  578. /* write the bundle to our execution area */
  579. buffer = state->buffer;
  580. err = __put_user(bundle, buffer++);
  581. /*
  582. * If we're really single-stepping, we take an INT_ILL after.
  583. * If we're just handling an unaligned access, we can just
  584. * jump directly back to where we were in user code.
  585. */
  586. if (is_single_step) {
  587. err |= __put_user(__single_step_ill_insn, buffer++);
  588. err |= __put_user(__single_step_ill_insn, buffer++);
  589. } else {
  590. long delta;
  591. if (state->update) {
  592. /* We have some state to update; do it inline */
  593. int ha16;
  594. bundle = __single_step_addli_insn;
  595. bundle |= create_Dest_X1(state->update_reg);
  596. bundle |= create_Imm16_X1(state->update_value);
  597. err |= __put_user(bundle, buffer++);
  598. bundle = __single_step_auli_insn;
  599. bundle |= create_Dest_X1(state->update_reg);
  600. bundle |= create_SrcA_X1(state->update_reg);
  601. ha16 = (state->update_value + 0x8000) >> 16;
  602. bundle |= create_Imm16_X1(ha16);
  603. err |= __put_user(bundle, buffer++);
  604. state->update = 0;
  605. }
  606. /* End with a jump back to the next instruction */
  607. delta = ((regs->pc + TILEPRO_BUNDLE_SIZE_IN_BYTES) -
  608. (unsigned long)buffer) >>
  609. TILEPRO_LOG2_BUNDLE_ALIGNMENT_IN_BYTES;
  610. bundle = __single_step_j_insn;
  611. bundle |= create_JOffLong_X1(delta);
  612. err |= __put_user(bundle, buffer++);
  613. }
  614. if (err) {
  615. pr_err("Fault when writing to single-step buffer\n");
  616. return;
  617. }
  618. /*
  619. * Flush the buffer.
  620. * We do a local flush only, since this is a thread-specific buffer.
  621. */
  622. __flush_icache_range((unsigned long)state->buffer,
  623. (unsigned long)buffer);
  624. /* Indicate enabled */
  625. state->is_enabled = is_single_step;
  626. regs->pc = (unsigned long)state->buffer;
  627. /* Fault immediately if we are coming back from a syscall. */
  628. if (regs->faultnum == INT_SWINT_1)
  629. regs->pc += 8;
  630. }
  631. #else
  632. static DEFINE_PER_CPU(unsigned long, ss_saved_pc);
  633. /*
  634. * Called directly on the occasion of an interrupt.
  635. *
  636. * If the process doesn't have single step set, then we use this as an
  637. * opportunity to turn single step off.
  638. *
  639. * It has been mentioned that we could conditionally turn off single stepping
  640. * on each entry into the kernel and rely on single_step_once to turn it
  641. * on for the processes that matter (as we already do), but this
  642. * implementation is somewhat more efficient in that we muck with registers
  643. * once on a bum interrupt rather than on every entry into the kernel.
  644. *
  645. * If SINGLE_STEP_CONTROL_K has CANCELED set, then an interrupt occurred,
  646. * so we have to run through this process again before we can say that an
  647. * instruction has executed.
  648. *
  649. * swint will set CANCELED, but it's a legitimate instruction. Fortunately
  650. * it changes the PC. If it hasn't changed, then we know that the interrupt
  651. * wasn't generated by swint and we'll need to run this process again before
  652. * we can say an instruction has executed.
  653. *
  654. * If either CANCELED == 0 or the PC's changed, we send out SIGTRAPs and get
  655. * on with our lives.
  656. */
  657. void gx_singlestep_handle(struct pt_regs *regs, int fault_num)
  658. {
  659. enum ctx_state prev_state = exception_enter();
  660. unsigned long *ss_pc = this_cpu_ptr(&ss_saved_pc);
  661. struct thread_info *info = (void *)current_thread_info();
  662. int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
  663. unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
  664. if (is_single_step == 0) {
  665. __insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 0);
  666. } else if ((*ss_pc != regs->pc) ||
  667. (!(control & SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK))) {
  668. control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
  669. control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
  670. __insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
  671. send_sigtrap(current, regs);
  672. }
  673. exception_exit(prev_state);
  674. }
  675. /*
  676. * Called from need_singlestep. Set up the control registers and the enable
  677. * register, then return back.
  678. */
  679. void single_step_once(struct pt_regs *regs)
  680. {
  681. unsigned long *ss_pc = this_cpu_ptr(&ss_saved_pc);
  682. unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
  683. *ss_pc = regs->pc;
  684. control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
  685. control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
  686. __insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
  687. __insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 1 << USER_PL);
  688. }
  689. void single_step_execve(void)
  690. {
  691. /* Nothing */
  692. }
  693. #endif /* !__tilegx__ */