process_64.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775
  1. /* arch/sparc64/kernel/process.c
  2. *
  3. * Copyright (C) 1995, 1996, 2008 David S. Miller (davem@davemloft.net)
  4. * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
  5. * Copyright (C) 1997, 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  6. */
  7. /*
  8. * This file handles the architecture-dependent parts of process handling..
  9. */
  10. #include <stdarg.h>
  11. #include <linux/errno.h>
  12. #include <linux/export.h>
  13. #include <linux/sched.h>
  14. #include <linux/kernel.h>
  15. #include <linux/mm.h>
  16. #include <linux/fs.h>
  17. #include <linux/smp.h>
  18. #include <linux/stddef.h>
  19. #include <linux/ptrace.h>
  20. #include <linux/slab.h>
  21. #include <linux/user.h>
  22. #include <linux/delay.h>
  23. #include <linux/compat.h>
  24. #include <linux/tick.h>
  25. #include <linux/init.h>
  26. #include <linux/cpu.h>
  27. #include <linux/perf_event.h>
  28. #include <linux/elfcore.h>
  29. #include <linux/sysrq.h>
  30. #include <linux/nmi.h>
  31. #include <linux/context_tracking.h>
  32. #include <asm/uaccess.h>
  33. #include <asm/page.h>
  34. #include <asm/pgalloc.h>
  35. #include <asm/pgtable.h>
  36. #include <asm/processor.h>
  37. #include <asm/pstate.h>
  38. #include <asm/elf.h>
  39. #include <asm/fpumacro.h>
  40. #include <asm/head.h>
  41. #include <asm/cpudata.h>
  42. #include <asm/mmu_context.h>
  43. #include <asm/unistd.h>
  44. #include <asm/hypervisor.h>
  45. #include <asm/syscalls.h>
  46. #include <asm/irq_regs.h>
  47. #include <asm/smp.h>
  48. #include <asm/pcr.h>
  49. #include "kstack.h"
  50. /* Idle loop support on sparc64. */
  51. void arch_cpu_idle(void)
  52. {
  53. if (tlb_type != hypervisor) {
  54. touch_nmi_watchdog();
  55. local_irq_enable();
  56. } else {
  57. unsigned long pstate;
  58. local_irq_enable();
  59. /* The sun4v sleeping code requires that we have PSTATE.IE cleared over
  60. * the cpu sleep hypervisor call.
  61. */
  62. __asm__ __volatile__(
  63. "rdpr %%pstate, %0\n\t"
  64. "andn %0, %1, %0\n\t"
  65. "wrpr %0, %%g0, %%pstate"
  66. : "=&r" (pstate)
  67. : "i" (PSTATE_IE));
  68. if (!need_resched() && !cpu_is_offline(smp_processor_id()))
  69. sun4v_cpu_yield();
  70. /* Re-enable interrupts. */
  71. __asm__ __volatile__(
  72. "rdpr %%pstate, %0\n\t"
  73. "or %0, %1, %0\n\t"
  74. "wrpr %0, %%g0, %%pstate"
  75. : "=&r" (pstate)
  76. : "i" (PSTATE_IE));
  77. }
  78. }
  79. #ifdef CONFIG_HOTPLUG_CPU
  80. void arch_cpu_idle_dead(void)
  81. {
  82. sched_preempt_enable_no_resched();
  83. cpu_play_dead();
  84. }
  85. #endif
  86. #ifdef CONFIG_COMPAT
  87. static void show_regwindow32(struct pt_regs *regs)
  88. {
  89. struct reg_window32 __user *rw;
  90. struct reg_window32 r_w;
  91. mm_segment_t old_fs;
  92. __asm__ __volatile__ ("flushw");
  93. rw = compat_ptr((unsigned)regs->u_regs[14]);
  94. old_fs = get_fs();
  95. set_fs (USER_DS);
  96. if (copy_from_user (&r_w, rw, sizeof(r_w))) {
  97. set_fs (old_fs);
  98. return;
  99. }
  100. set_fs (old_fs);
  101. printk("l0: %08x l1: %08x l2: %08x l3: %08x "
  102. "l4: %08x l5: %08x l6: %08x l7: %08x\n",
  103. r_w.locals[0], r_w.locals[1], r_w.locals[2], r_w.locals[3],
  104. r_w.locals[4], r_w.locals[5], r_w.locals[6], r_w.locals[7]);
  105. printk("i0: %08x i1: %08x i2: %08x i3: %08x "
  106. "i4: %08x i5: %08x i6: %08x i7: %08x\n",
  107. r_w.ins[0], r_w.ins[1], r_w.ins[2], r_w.ins[3],
  108. r_w.ins[4], r_w.ins[5], r_w.ins[6], r_w.ins[7]);
  109. }
  110. #else
  111. #define show_regwindow32(regs) do { } while (0)
  112. #endif
  113. static void show_regwindow(struct pt_regs *regs)
  114. {
  115. struct reg_window __user *rw;
  116. struct reg_window *rwk;
  117. struct reg_window r_w;
  118. mm_segment_t old_fs;
  119. if ((regs->tstate & TSTATE_PRIV) || !(test_thread_flag(TIF_32BIT))) {
  120. __asm__ __volatile__ ("flushw");
  121. rw = (struct reg_window __user *)
  122. (regs->u_regs[14] + STACK_BIAS);
  123. rwk = (struct reg_window *)
  124. (regs->u_regs[14] + STACK_BIAS);
  125. if (!(regs->tstate & TSTATE_PRIV)) {
  126. old_fs = get_fs();
  127. set_fs (USER_DS);
  128. if (copy_from_user (&r_w, rw, sizeof(r_w))) {
  129. set_fs (old_fs);
  130. return;
  131. }
  132. rwk = &r_w;
  133. set_fs (old_fs);
  134. }
  135. } else {
  136. show_regwindow32(regs);
  137. return;
  138. }
  139. printk("l0: %016lx l1: %016lx l2: %016lx l3: %016lx\n",
  140. rwk->locals[0], rwk->locals[1], rwk->locals[2], rwk->locals[3]);
  141. printk("l4: %016lx l5: %016lx l6: %016lx l7: %016lx\n",
  142. rwk->locals[4], rwk->locals[5], rwk->locals[6], rwk->locals[7]);
  143. printk("i0: %016lx i1: %016lx i2: %016lx i3: %016lx\n",
  144. rwk->ins[0], rwk->ins[1], rwk->ins[2], rwk->ins[3]);
  145. printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
  146. rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
  147. if (regs->tstate & TSTATE_PRIV)
  148. printk("I7: <%pS>\n", (void *) rwk->ins[7]);
  149. }
  150. void show_regs(struct pt_regs *regs)
  151. {
  152. show_regs_print_info(KERN_DEFAULT);
  153. printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
  154. regs->tpc, regs->tnpc, regs->y, print_tainted());
  155. printk("TPC: <%pS>\n", (void *) regs->tpc);
  156. printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
  157. regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
  158. regs->u_regs[3]);
  159. printk("g4: %016lx g5: %016lx g6: %016lx g7: %016lx\n",
  160. regs->u_regs[4], regs->u_regs[5], regs->u_regs[6],
  161. regs->u_regs[7]);
  162. printk("o0: %016lx o1: %016lx o2: %016lx o3: %016lx\n",
  163. regs->u_regs[8], regs->u_regs[9], regs->u_regs[10],
  164. regs->u_regs[11]);
  165. printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
  166. regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
  167. regs->u_regs[15]);
  168. printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
  169. show_regwindow(regs);
  170. show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
  171. }
  172. union global_cpu_snapshot global_cpu_snapshot[NR_CPUS];
  173. static DEFINE_SPINLOCK(global_cpu_snapshot_lock);
  174. static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs,
  175. int this_cpu)
  176. {
  177. struct global_reg_snapshot *rp;
  178. flushw_all();
  179. rp = &global_cpu_snapshot[this_cpu].reg;
  180. rp->tstate = regs->tstate;
  181. rp->tpc = regs->tpc;
  182. rp->tnpc = regs->tnpc;
  183. rp->o7 = regs->u_regs[UREG_I7];
  184. if (regs->tstate & TSTATE_PRIV) {
  185. struct reg_window *rw;
  186. rw = (struct reg_window *)
  187. (regs->u_regs[UREG_FP] + STACK_BIAS);
  188. if (kstack_valid(tp, (unsigned long) rw)) {
  189. rp->i7 = rw->ins[7];
  190. rw = (struct reg_window *)
  191. (rw->ins[6] + STACK_BIAS);
  192. if (kstack_valid(tp, (unsigned long) rw))
  193. rp->rpc = rw->ins[7];
  194. }
  195. } else {
  196. rp->i7 = 0;
  197. rp->rpc = 0;
  198. }
  199. rp->thread = tp;
  200. }
  201. /* In order to avoid hangs we do not try to synchronize with the
  202. * global register dump client cpus. The last store they make is to
  203. * the thread pointer, so do a short poll waiting for that to become
  204. * non-NULL.
  205. */
  206. static void __global_reg_poll(struct global_reg_snapshot *gp)
  207. {
  208. int limit = 0;
  209. while (!gp->thread && ++limit < 100) {
  210. barrier();
  211. udelay(1);
  212. }
  213. }
  214. void arch_trigger_all_cpu_backtrace(bool include_self)
  215. {
  216. struct thread_info *tp = current_thread_info();
  217. struct pt_regs *regs = get_irq_regs();
  218. unsigned long flags;
  219. int this_cpu, cpu;
  220. if (!regs)
  221. regs = tp->kregs;
  222. spin_lock_irqsave(&global_cpu_snapshot_lock, flags);
  223. this_cpu = raw_smp_processor_id();
  224. memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
  225. if (include_self)
  226. __global_reg_self(tp, regs, this_cpu);
  227. smp_fetch_global_regs();
  228. for_each_online_cpu(cpu) {
  229. struct global_reg_snapshot *gp;
  230. if (!include_self && cpu == this_cpu)
  231. continue;
  232. gp = &global_cpu_snapshot[cpu].reg;
  233. __global_reg_poll(gp);
  234. tp = gp->thread;
  235. printk("%c CPU[%3d]: TSTATE[%016lx] TPC[%016lx] TNPC[%016lx] TASK[%s:%d]\n",
  236. (cpu == this_cpu ? '*' : ' '), cpu,
  237. gp->tstate, gp->tpc, gp->tnpc,
  238. ((tp && tp->task) ? tp->task->comm : "NULL"),
  239. ((tp && tp->task) ? tp->task->pid : -1));
  240. if (gp->tstate & TSTATE_PRIV) {
  241. printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
  242. (void *) gp->tpc,
  243. (void *) gp->o7,
  244. (void *) gp->i7,
  245. (void *) gp->rpc);
  246. } else {
  247. printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n",
  248. gp->tpc, gp->o7, gp->i7, gp->rpc);
  249. }
  250. }
  251. memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
  252. spin_unlock_irqrestore(&global_cpu_snapshot_lock, flags);
  253. }
  254. #ifdef CONFIG_MAGIC_SYSRQ
  255. static void sysrq_handle_globreg(int key)
  256. {
  257. arch_trigger_all_cpu_backtrace(true);
  258. }
  259. static struct sysrq_key_op sparc_globalreg_op = {
  260. .handler = sysrq_handle_globreg,
  261. .help_msg = "global-regs(y)",
  262. .action_msg = "Show Global CPU Regs",
  263. };
  264. static void __global_pmu_self(int this_cpu)
  265. {
  266. struct global_pmu_snapshot *pp;
  267. int i, num;
  268. if (!pcr_ops)
  269. return;
  270. pp = &global_cpu_snapshot[this_cpu].pmu;
  271. num = 1;
  272. if (tlb_type == hypervisor &&
  273. sun4v_chip_type >= SUN4V_CHIP_NIAGARA4)
  274. num = 4;
  275. for (i = 0; i < num; i++) {
  276. pp->pcr[i] = pcr_ops->read_pcr(i);
  277. pp->pic[i] = pcr_ops->read_pic(i);
  278. }
  279. }
  280. static void __global_pmu_poll(struct global_pmu_snapshot *pp)
  281. {
  282. int limit = 0;
  283. while (!pp->pcr[0] && ++limit < 100) {
  284. barrier();
  285. udelay(1);
  286. }
  287. }
  288. static void pmu_snapshot_all_cpus(void)
  289. {
  290. unsigned long flags;
  291. int this_cpu, cpu;
  292. spin_lock_irqsave(&global_cpu_snapshot_lock, flags);
  293. memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
  294. this_cpu = raw_smp_processor_id();
  295. __global_pmu_self(this_cpu);
  296. smp_fetch_global_pmu();
  297. for_each_online_cpu(cpu) {
  298. struct global_pmu_snapshot *pp = &global_cpu_snapshot[cpu].pmu;
  299. __global_pmu_poll(pp);
  300. printk("%c CPU[%3d]: PCR[%08lx:%08lx:%08lx:%08lx] PIC[%08lx:%08lx:%08lx:%08lx]\n",
  301. (cpu == this_cpu ? '*' : ' '), cpu,
  302. pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3],
  303. pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]);
  304. }
  305. memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
  306. spin_unlock_irqrestore(&global_cpu_snapshot_lock, flags);
  307. }
  308. static void sysrq_handle_globpmu(int key)
  309. {
  310. pmu_snapshot_all_cpus();
  311. }
  312. static struct sysrq_key_op sparc_globalpmu_op = {
  313. .handler = sysrq_handle_globpmu,
  314. .help_msg = "global-pmu(x)",
  315. .action_msg = "Show Global PMU Regs",
  316. };
  317. static int __init sparc_sysrq_init(void)
  318. {
  319. int ret = register_sysrq_key('y', &sparc_globalreg_op);
  320. if (!ret)
  321. ret = register_sysrq_key('x', &sparc_globalpmu_op);
  322. return ret;
  323. }
  324. core_initcall(sparc_sysrq_init);
  325. #endif
  326. unsigned long thread_saved_pc(struct task_struct *tsk)
  327. {
  328. struct thread_info *ti = task_thread_info(tsk);
  329. unsigned long ret = 0xdeadbeefUL;
  330. if (ti && ti->ksp) {
  331. unsigned long *sp;
  332. sp = (unsigned long *)(ti->ksp + STACK_BIAS);
  333. if (((unsigned long)sp & (sizeof(long) - 1)) == 0UL &&
  334. sp[14]) {
  335. unsigned long *fp;
  336. fp = (unsigned long *)(sp[14] + STACK_BIAS);
  337. if (((unsigned long)fp & (sizeof(long) - 1)) == 0UL)
  338. ret = fp[15];
  339. }
  340. }
  341. return ret;
  342. }
  343. /* Free current thread data structures etc.. */
  344. void exit_thread(void)
  345. {
  346. struct thread_info *t = current_thread_info();
  347. if (t->utraps) {
  348. if (t->utraps[0] < 2)
  349. kfree (t->utraps);
  350. else
  351. t->utraps[0]--;
  352. }
  353. }
  354. void flush_thread(void)
  355. {
  356. struct thread_info *t = current_thread_info();
  357. struct mm_struct *mm;
  358. mm = t->task->mm;
  359. if (mm)
  360. tsb_context_switch(mm);
  361. set_thread_wsaved(0);
  362. /* Clear FPU register state. */
  363. t->fpsaved[0] = 0;
  364. }
  365. /* It's a bit more tricky when 64-bit tasks are involved... */
  366. static unsigned long clone_stackframe(unsigned long csp, unsigned long psp)
  367. {
  368. bool stack_64bit = test_thread_64bit_stack(psp);
  369. unsigned long fp, distance, rval;
  370. if (stack_64bit) {
  371. csp += STACK_BIAS;
  372. psp += STACK_BIAS;
  373. __get_user(fp, &(((struct reg_window __user *)psp)->ins[6]));
  374. fp += STACK_BIAS;
  375. if (test_thread_flag(TIF_32BIT))
  376. fp &= 0xffffffff;
  377. } else
  378. __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6]));
  379. /* Now align the stack as this is mandatory in the Sparc ABI
  380. * due to how register windows work. This hides the
  381. * restriction from thread libraries etc.
  382. */
  383. csp &= ~15UL;
  384. distance = fp - psp;
  385. rval = (csp - distance);
  386. if (copy_in_user((void __user *) rval, (void __user *) psp, distance))
  387. rval = 0;
  388. else if (!stack_64bit) {
  389. if (put_user(((u32)csp),
  390. &(((struct reg_window32 __user *)rval)->ins[6])))
  391. rval = 0;
  392. } else {
  393. if (put_user(((u64)csp - STACK_BIAS),
  394. &(((struct reg_window __user *)rval)->ins[6])))
  395. rval = 0;
  396. else
  397. rval = rval - STACK_BIAS;
  398. }
  399. return rval;
  400. }
  401. /* Standard stuff. */
  402. static inline void shift_window_buffer(int first_win, int last_win,
  403. struct thread_info *t)
  404. {
  405. int i;
  406. for (i = first_win; i < last_win; i++) {
  407. t->rwbuf_stkptrs[i] = t->rwbuf_stkptrs[i+1];
  408. memcpy(&t->reg_window[i], &t->reg_window[i+1],
  409. sizeof(struct reg_window));
  410. }
  411. }
  412. void synchronize_user_stack(void)
  413. {
  414. struct thread_info *t = current_thread_info();
  415. unsigned long window;
  416. flush_user_windows();
  417. if ((window = get_thread_wsaved()) != 0) {
  418. window -= 1;
  419. do {
  420. struct reg_window *rwin = &t->reg_window[window];
  421. int winsize = sizeof(struct reg_window);
  422. unsigned long sp;
  423. sp = t->rwbuf_stkptrs[window];
  424. if (test_thread_64bit_stack(sp))
  425. sp += STACK_BIAS;
  426. else
  427. winsize = sizeof(struct reg_window32);
  428. if (!copy_to_user((char __user *)sp, rwin, winsize)) {
  429. shift_window_buffer(window, get_thread_wsaved() - 1, t);
  430. set_thread_wsaved(get_thread_wsaved() - 1);
  431. }
  432. } while (window--);
  433. }
  434. }
  435. static void stack_unaligned(unsigned long sp)
  436. {
  437. siginfo_t info;
  438. info.si_signo = SIGBUS;
  439. info.si_errno = 0;
  440. info.si_code = BUS_ADRALN;
  441. info.si_addr = (void __user *) sp;
  442. info.si_trapno = 0;
  443. force_sig_info(SIGBUS, &info, current);
  444. }
  445. void fault_in_user_windows(void)
  446. {
  447. struct thread_info *t = current_thread_info();
  448. unsigned long window;
  449. flush_user_windows();
  450. window = get_thread_wsaved();
  451. if (likely(window != 0)) {
  452. window -= 1;
  453. do {
  454. struct reg_window *rwin = &t->reg_window[window];
  455. int winsize = sizeof(struct reg_window);
  456. unsigned long sp;
  457. sp = t->rwbuf_stkptrs[window];
  458. if (test_thread_64bit_stack(sp))
  459. sp += STACK_BIAS;
  460. else
  461. winsize = sizeof(struct reg_window32);
  462. if (unlikely(sp & 0x7UL))
  463. stack_unaligned(sp);
  464. if (unlikely(copy_to_user((char __user *)sp,
  465. rwin, winsize)))
  466. goto barf;
  467. } while (window--);
  468. }
  469. set_thread_wsaved(0);
  470. return;
  471. barf:
  472. set_thread_wsaved(window + 1);
  473. user_exit();
  474. do_exit(SIGILL);
  475. }
  476. asmlinkage long sparc_do_fork(unsigned long clone_flags,
  477. unsigned long stack_start,
  478. struct pt_regs *regs,
  479. unsigned long stack_size)
  480. {
  481. int __user *parent_tid_ptr, *child_tid_ptr;
  482. unsigned long orig_i1 = regs->u_regs[UREG_I1];
  483. long ret;
  484. #ifdef CONFIG_COMPAT
  485. if (test_thread_flag(TIF_32BIT)) {
  486. parent_tid_ptr = compat_ptr(regs->u_regs[UREG_I2]);
  487. child_tid_ptr = compat_ptr(regs->u_regs[UREG_I4]);
  488. } else
  489. #endif
  490. {
  491. parent_tid_ptr = (int __user *) regs->u_regs[UREG_I2];
  492. child_tid_ptr = (int __user *) regs->u_regs[UREG_I4];
  493. }
  494. ret = do_fork(clone_flags, stack_start, stack_size,
  495. parent_tid_ptr, child_tid_ptr);
  496. /* If we get an error and potentially restart the system
  497. * call, we're screwed because copy_thread() clobbered
  498. * the parent's %o1. So detect that case and restore it
  499. * here.
  500. */
  501. if ((unsigned long)ret >= -ERESTART_RESTARTBLOCK)
  502. regs->u_regs[UREG_I1] = orig_i1;
  503. return ret;
  504. }
  505. /* Copy a Sparc thread. The fork() return value conventions
  506. * under SunOS are nothing short of bletcherous:
  507. * Parent --> %o0 == childs pid, %o1 == 0
  508. * Child --> %o0 == parents pid, %o1 == 1
  509. */
  510. int copy_thread(unsigned long clone_flags, unsigned long sp,
  511. unsigned long arg, struct task_struct *p)
  512. {
  513. struct thread_info *t = task_thread_info(p);
  514. struct pt_regs *regs = current_pt_regs();
  515. struct sparc_stackf *parent_sf;
  516. unsigned long child_stack_sz;
  517. char *child_trap_frame;
  518. /* Calculate offset to stack_frame & pt_regs */
  519. child_stack_sz = (STACKFRAME_SZ + TRACEREG_SZ);
  520. child_trap_frame = (task_stack_page(p) +
  521. (THREAD_SIZE - child_stack_sz));
  522. t->new_child = 1;
  523. t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS;
  524. t->kregs = (struct pt_regs *) (child_trap_frame +
  525. sizeof(struct sparc_stackf));
  526. t->fpsaved[0] = 0;
  527. if (unlikely(p->flags & PF_KTHREAD)) {
  528. memset(child_trap_frame, 0, child_stack_sz);
  529. __thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] =
  530. (current_pt_regs()->tstate + 1) & TSTATE_CWP;
  531. t->current_ds = ASI_P;
  532. t->kregs->u_regs[UREG_G1] = sp; /* function */
  533. t->kregs->u_regs[UREG_G2] = arg;
  534. return 0;
  535. }
  536. parent_sf = ((struct sparc_stackf *) regs) - 1;
  537. memcpy(child_trap_frame, parent_sf, child_stack_sz);
  538. if (t->flags & _TIF_32BIT) {
  539. sp &= 0x00000000ffffffffUL;
  540. regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
  541. }
  542. t->kregs->u_regs[UREG_FP] = sp;
  543. __thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] =
  544. (regs->tstate + 1) & TSTATE_CWP;
  545. t->current_ds = ASI_AIUS;
  546. if (sp != regs->u_regs[UREG_FP]) {
  547. unsigned long csp;
  548. csp = clone_stackframe(sp, regs->u_regs[UREG_FP]);
  549. if (!csp)
  550. return -EFAULT;
  551. t->kregs->u_regs[UREG_FP] = csp;
  552. }
  553. if (t->utraps)
  554. t->utraps[0]++;
  555. /* Set the return value for the child. */
  556. t->kregs->u_regs[UREG_I0] = current->pid;
  557. t->kregs->u_regs[UREG_I1] = 1;
  558. /* Set the second return value for the parent. */
  559. regs->u_regs[UREG_I1] = 0;
  560. if (clone_flags & CLONE_SETTLS)
  561. t->kregs->u_regs[UREG_G7] = regs->u_regs[UREG_I3];
  562. return 0;
  563. }
  564. typedef struct {
  565. union {
  566. unsigned int pr_regs[32];
  567. unsigned long pr_dregs[16];
  568. } pr_fr;
  569. unsigned int __unused;
  570. unsigned int pr_fsr;
  571. unsigned char pr_qcnt;
  572. unsigned char pr_q_entrysize;
  573. unsigned char pr_en;
  574. unsigned int pr_q[64];
  575. } elf_fpregset_t32;
  576. /*
  577. * fill in the fpu structure for a core dump.
  578. */
  579. int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs)
  580. {
  581. unsigned long *kfpregs = current_thread_info()->fpregs;
  582. unsigned long fprs = current_thread_info()->fpsaved[0];
  583. if (test_thread_flag(TIF_32BIT)) {
  584. elf_fpregset_t32 *fpregs32 = (elf_fpregset_t32 *)fpregs;
  585. if (fprs & FPRS_DL)
  586. memcpy(&fpregs32->pr_fr.pr_regs[0], kfpregs,
  587. sizeof(unsigned int) * 32);
  588. else
  589. memset(&fpregs32->pr_fr.pr_regs[0], 0,
  590. sizeof(unsigned int) * 32);
  591. fpregs32->pr_qcnt = 0;
  592. fpregs32->pr_q_entrysize = 8;
  593. memset(&fpregs32->pr_q[0], 0,
  594. (sizeof(unsigned int) * 64));
  595. if (fprs & FPRS_FEF) {
  596. fpregs32->pr_fsr = (unsigned int) current_thread_info()->xfsr[0];
  597. fpregs32->pr_en = 1;
  598. } else {
  599. fpregs32->pr_fsr = 0;
  600. fpregs32->pr_en = 0;
  601. }
  602. } else {
  603. if(fprs & FPRS_DL)
  604. memcpy(&fpregs->pr_regs[0], kfpregs,
  605. sizeof(unsigned int) * 32);
  606. else
  607. memset(&fpregs->pr_regs[0], 0,
  608. sizeof(unsigned int) * 32);
  609. if(fprs & FPRS_DU)
  610. memcpy(&fpregs->pr_regs[16], kfpregs+16,
  611. sizeof(unsigned int) * 32);
  612. else
  613. memset(&fpregs->pr_regs[16], 0,
  614. sizeof(unsigned int) * 32);
  615. if(fprs & FPRS_FEF) {
  616. fpregs->pr_fsr = current_thread_info()->xfsr[0];
  617. fpregs->pr_gsr = current_thread_info()->gsr[0];
  618. } else {
  619. fpregs->pr_fsr = fpregs->pr_gsr = 0;
  620. }
  621. fpregs->pr_fprs = fprs;
  622. }
  623. return 1;
  624. }
  625. EXPORT_SYMBOL(dump_fpu);
  626. unsigned long get_wchan(struct task_struct *task)
  627. {
  628. unsigned long pc, fp, bias = 0;
  629. struct thread_info *tp;
  630. struct reg_window *rw;
  631. unsigned long ret = 0;
  632. int count = 0;
  633. if (!task || task == current ||
  634. task->state == TASK_RUNNING)
  635. goto out;
  636. tp = task_thread_info(task);
  637. bias = STACK_BIAS;
  638. fp = task_thread_info(task)->ksp + bias;
  639. do {
  640. if (!kstack_valid(tp, fp))
  641. break;
  642. rw = (struct reg_window *) fp;
  643. pc = rw->ins[7];
  644. if (!in_sched_functions(pc)) {
  645. ret = pc;
  646. goto out;
  647. }
  648. fp = rw->ins[6] + bias;
  649. } while (++count < 16);
  650. out:
  651. return ret;
  652. }