align.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074
  1. /* align.c - handle alignment exceptions for the Power PC.
  2. *
  3. * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
  4. * Copyright (c) 1998-1999 TiVo, Inc.
  5. * PowerPC 403GCX modifications.
  6. * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
  7. * PowerPC 403GCX/405GP modifications.
  8. * Copyright (c) 2001-2002 PPC64 team, IBM Corp
  9. * 64-bit and Power4 support
  10. * Copyright (c) 2005 Benjamin Herrenschmidt, IBM Corp
  11. * <benh@kernel.crashing.org>
  12. * Merge ppc32 and ppc64 implementations
  13. *
  14. * This program is free software; you can redistribute it and/or
  15. * modify it under the terms of the GNU General Public License
  16. * as published by the Free Software Foundation; either version
  17. * 2 of the License, or (at your option) any later version.
  18. */
  19. #include <linux/kernel.h>
  20. #include <linux/mm.h>
  21. #include <asm/processor.h>
  22. #include <asm/uaccess.h>
  23. #include <asm/cache.h>
  24. #include <asm/cputable.h>
  25. #include <asm/emulated_ops.h>
  26. #include <asm/switch_to.h>
  27. struct aligninfo {
  28. unsigned char len;
  29. unsigned char flags;
  30. };
  31. #define IS_XFORM(inst) (((inst) >> 26) == 31)
  32. #define IS_DSFORM(inst) (((inst) >> 26) >= 56)
  33. #define INVALID { 0, 0 }
  34. /* Bits in the flags field */
  35. #define LD 0 /* load */
  36. #define ST 1 /* store */
  37. #define SE 2 /* sign-extend value, or FP ld/st as word */
  38. #define F 4 /* to/from fp regs */
  39. #define U 8 /* update index register */
  40. #define M 0x10 /* multiple load/store */
  41. #define SW 0x20 /* byte swap */
  42. #define S 0x40 /* single-precision fp or... */
  43. #define SX 0x40 /* ... byte count in XER */
  44. #define HARD 0x80 /* string, stwcx. */
  45. #define E4 0x40 /* SPE endianness is word */
  46. #define E8 0x80 /* SPE endianness is double word */
  47. #define SPLT 0x80 /* VSX SPLAT load */
  48. /* DSISR bits reported for a DCBZ instruction: */
  49. #define DCBZ 0x5f /* 8xx/82xx dcbz faults when cache not enabled */
  50. /*
  51. * The PowerPC stores certain bits of the instruction that caused the
  52. * alignment exception in the DSISR register. This array maps those
  53. * bits to information about the operand length and what the
  54. * instruction would do.
  55. */
  56. static struct aligninfo aligninfo[128] = {
  57. { 4, LD }, /* 00 0 0000: lwz / lwarx */
  58. INVALID, /* 00 0 0001 */
  59. { 4, ST }, /* 00 0 0010: stw */
  60. INVALID, /* 00 0 0011 */
  61. { 2, LD }, /* 00 0 0100: lhz */
  62. { 2, LD+SE }, /* 00 0 0101: lha */
  63. { 2, ST }, /* 00 0 0110: sth */
  64. { 4, LD+M }, /* 00 0 0111: lmw */
  65. { 4, LD+F+S }, /* 00 0 1000: lfs */
  66. { 8, LD+F }, /* 00 0 1001: lfd */
  67. { 4, ST+F+S }, /* 00 0 1010: stfs */
  68. { 8, ST+F }, /* 00 0 1011: stfd */
  69. { 16, LD }, /* 00 0 1100: lq */
  70. { 8, LD }, /* 00 0 1101: ld/ldu/lwa */
  71. INVALID, /* 00 0 1110 */
  72. { 8, ST }, /* 00 0 1111: std/stdu */
  73. { 4, LD+U }, /* 00 1 0000: lwzu */
  74. INVALID, /* 00 1 0001 */
  75. { 4, ST+U }, /* 00 1 0010: stwu */
  76. INVALID, /* 00 1 0011 */
  77. { 2, LD+U }, /* 00 1 0100: lhzu */
  78. { 2, LD+SE+U }, /* 00 1 0101: lhau */
  79. { 2, ST+U }, /* 00 1 0110: sthu */
  80. { 4, ST+M }, /* 00 1 0111: stmw */
  81. { 4, LD+F+S+U }, /* 00 1 1000: lfsu */
  82. { 8, LD+F+U }, /* 00 1 1001: lfdu */
  83. { 4, ST+F+S+U }, /* 00 1 1010: stfsu */
  84. { 8, ST+F+U }, /* 00 1 1011: stfdu */
  85. { 16, LD+F }, /* 00 1 1100: lfdp */
  86. INVALID, /* 00 1 1101 */
  87. { 16, ST+F }, /* 00 1 1110: stfdp */
  88. INVALID, /* 00 1 1111 */
  89. { 8, LD }, /* 01 0 0000: ldx */
  90. INVALID, /* 01 0 0001 */
  91. { 8, ST }, /* 01 0 0010: stdx */
  92. INVALID, /* 01 0 0011 */
  93. INVALID, /* 01 0 0100 */
  94. { 4, LD+SE }, /* 01 0 0101: lwax */
  95. INVALID, /* 01 0 0110 */
  96. INVALID, /* 01 0 0111 */
  97. { 4, LD+M+HARD+SX }, /* 01 0 1000: lswx */
  98. { 4, LD+M+HARD }, /* 01 0 1001: lswi */
  99. { 4, ST+M+HARD+SX }, /* 01 0 1010: stswx */
  100. { 4, ST+M+HARD }, /* 01 0 1011: stswi */
  101. INVALID, /* 01 0 1100 */
  102. { 8, LD+U }, /* 01 0 1101: ldu */
  103. INVALID, /* 01 0 1110 */
  104. { 8, ST+U }, /* 01 0 1111: stdu */
  105. { 8, LD+U }, /* 01 1 0000: ldux */
  106. INVALID, /* 01 1 0001 */
  107. { 8, ST+U }, /* 01 1 0010: stdux */
  108. INVALID, /* 01 1 0011 */
  109. INVALID, /* 01 1 0100 */
  110. { 4, LD+SE+U }, /* 01 1 0101: lwaux */
  111. INVALID, /* 01 1 0110 */
  112. INVALID, /* 01 1 0111 */
  113. INVALID, /* 01 1 1000 */
  114. INVALID, /* 01 1 1001 */
  115. INVALID, /* 01 1 1010 */
  116. INVALID, /* 01 1 1011 */
  117. INVALID, /* 01 1 1100 */
  118. INVALID, /* 01 1 1101 */
  119. INVALID, /* 01 1 1110 */
  120. INVALID, /* 01 1 1111 */
  121. INVALID, /* 10 0 0000 */
  122. INVALID, /* 10 0 0001 */
  123. INVALID, /* 10 0 0010: stwcx. */
  124. INVALID, /* 10 0 0011 */
  125. INVALID, /* 10 0 0100 */
  126. INVALID, /* 10 0 0101 */
  127. INVALID, /* 10 0 0110 */
  128. INVALID, /* 10 0 0111 */
  129. { 4, LD+SW }, /* 10 0 1000: lwbrx */
  130. INVALID, /* 10 0 1001 */
  131. { 4, ST+SW }, /* 10 0 1010: stwbrx */
  132. INVALID, /* 10 0 1011 */
  133. { 2, LD+SW }, /* 10 0 1100: lhbrx */
  134. { 4, LD+SE }, /* 10 0 1101 lwa */
  135. { 2, ST+SW }, /* 10 0 1110: sthbrx */
  136. { 16, ST }, /* 10 0 1111: stq */
  137. INVALID, /* 10 1 0000 */
  138. INVALID, /* 10 1 0001 */
  139. INVALID, /* 10 1 0010 */
  140. INVALID, /* 10 1 0011 */
  141. INVALID, /* 10 1 0100 */
  142. INVALID, /* 10 1 0101 */
  143. INVALID, /* 10 1 0110 */
  144. INVALID, /* 10 1 0111 */
  145. INVALID, /* 10 1 1000 */
  146. INVALID, /* 10 1 1001 */
  147. INVALID, /* 10 1 1010 */
  148. INVALID, /* 10 1 1011 */
  149. INVALID, /* 10 1 1100 */
  150. INVALID, /* 10 1 1101 */
  151. INVALID, /* 10 1 1110 */
  152. { 0, ST+HARD }, /* 10 1 1111: dcbz */
  153. { 4, LD }, /* 11 0 0000: lwzx */
  154. INVALID, /* 11 0 0001 */
  155. { 4, ST }, /* 11 0 0010: stwx */
  156. INVALID, /* 11 0 0011 */
  157. { 2, LD }, /* 11 0 0100: lhzx */
  158. { 2, LD+SE }, /* 11 0 0101: lhax */
  159. { 2, ST }, /* 11 0 0110: sthx */
  160. INVALID, /* 11 0 0111 */
  161. { 4, LD+F+S }, /* 11 0 1000: lfsx */
  162. { 8, LD+F }, /* 11 0 1001: lfdx */
  163. { 4, ST+F+S }, /* 11 0 1010: stfsx */
  164. { 8, ST+F }, /* 11 0 1011: stfdx */
  165. { 16, LD+F }, /* 11 0 1100: lfdpx */
  166. { 4, LD+F+SE }, /* 11 0 1101: lfiwax */
  167. { 16, ST+F }, /* 11 0 1110: stfdpx */
  168. { 4, ST+F }, /* 11 0 1111: stfiwx */
  169. { 4, LD+U }, /* 11 1 0000: lwzux */
  170. INVALID, /* 11 1 0001 */
  171. { 4, ST+U }, /* 11 1 0010: stwux */
  172. INVALID, /* 11 1 0011 */
  173. { 2, LD+U }, /* 11 1 0100: lhzux */
  174. { 2, LD+SE+U }, /* 11 1 0101: lhaux */
  175. { 2, ST+U }, /* 11 1 0110: sthux */
  176. INVALID, /* 11 1 0111 */
  177. { 4, LD+F+S+U }, /* 11 1 1000: lfsux */
  178. { 8, LD+F+U }, /* 11 1 1001: lfdux */
  179. { 4, ST+F+S+U }, /* 11 1 1010: stfsux */
  180. { 8, ST+F+U }, /* 11 1 1011: stfdux */
  181. INVALID, /* 11 1 1100 */
  182. { 4, LD+F }, /* 11 1 1101: lfiwzx */
  183. INVALID, /* 11 1 1110 */
  184. INVALID, /* 11 1 1111 */
  185. };
  186. /*
  187. * Create a DSISR value from the instruction
  188. */
  189. static inline unsigned make_dsisr(unsigned instr)
  190. {
  191. unsigned dsisr;
  192. /* bits 6:15 --> 22:31 */
  193. dsisr = (instr & 0x03ff0000) >> 16;
  194. if (IS_XFORM(instr)) {
  195. /* bits 29:30 --> 15:16 */
  196. dsisr |= (instr & 0x00000006) << 14;
  197. /* bit 25 --> 17 */
  198. dsisr |= (instr & 0x00000040) << 8;
  199. /* bits 21:24 --> 18:21 */
  200. dsisr |= (instr & 0x00000780) << 3;
  201. } else {
  202. /* bit 5 --> 17 */
  203. dsisr |= (instr & 0x04000000) >> 12;
  204. /* bits 1: 4 --> 18:21 */
  205. dsisr |= (instr & 0x78000000) >> 17;
  206. /* bits 30:31 --> 12:13 */
  207. if (IS_DSFORM(instr))
  208. dsisr |= (instr & 0x00000003) << 18;
  209. }
  210. return dsisr;
  211. }
  212. /*
  213. * The dcbz (data cache block zero) instruction
  214. * gives an alignment fault if used on non-cacheable
  215. * memory. We handle the fault mainly for the
  216. * case when we are running with the cache disabled
  217. * for debugging.
  218. */
  219. static int emulate_dcbz(struct pt_regs *regs, unsigned char __user *addr)
  220. {
  221. long __user *p;
  222. int i, size;
  223. #ifdef __powerpc64__
  224. size = ppc64_caches.dline_size;
  225. #else
  226. size = L1_CACHE_BYTES;
  227. #endif
  228. p = (long __user *) (regs->dar & -size);
  229. if (user_mode(regs) && !access_ok(VERIFY_WRITE, p, size))
  230. return -EFAULT;
  231. for (i = 0; i < size / sizeof(long); ++i)
  232. if (__put_user_inatomic(0, p+i))
  233. return -EFAULT;
  234. return 1;
  235. }
  236. /*
  237. * Emulate load & store multiple instructions
  238. * On 64-bit machines, these instructions only affect/use the
  239. * bottom 4 bytes of each register, and the loads clear the
  240. * top 4 bytes of the affected register.
  241. */
  242. #ifdef __BIG_ENDIAN__
  243. #ifdef CONFIG_PPC64
  244. #define REG_BYTE(rp, i) *((u8 *)((rp) + ((i) >> 2)) + ((i) & 3) + 4)
  245. #else
  246. #define REG_BYTE(rp, i) *((u8 *)(rp) + (i))
  247. #endif
  248. #endif
  249. #ifdef __LITTLE_ENDIAN__
  250. #define REG_BYTE(rp, i) (*(((u8 *)((rp) + ((i)>>2)) + ((i)&3))))
  251. #endif
  252. #define SWIZ_PTR(p) ((unsigned char __user *)((p) ^ swiz))
  253. static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
  254. unsigned int reg, unsigned int nb,
  255. unsigned int flags, unsigned int instr,
  256. unsigned long swiz)
  257. {
  258. unsigned long *rptr;
  259. unsigned int nb0, i, bswiz;
  260. unsigned long p;
  261. /*
  262. * We do not try to emulate 8 bytes multiple as they aren't really
  263. * available in our operating environments and we don't try to
  264. * emulate multiples operations in kernel land as they should never
  265. * be used/generated there at least not on unaligned boundaries
  266. */
  267. if (unlikely((nb > 4) || !user_mode(regs)))
  268. return 0;
  269. /* lmw, stmw, lswi/x, stswi/x */
  270. nb0 = 0;
  271. if (flags & HARD) {
  272. if (flags & SX) {
  273. nb = regs->xer & 127;
  274. if (nb == 0)
  275. return 1;
  276. } else {
  277. unsigned long pc = regs->nip ^ (swiz & 4);
  278. if (__get_user_inatomic(instr,
  279. (unsigned int __user *)pc))
  280. return -EFAULT;
  281. if (swiz == 0 && (flags & SW))
  282. instr = cpu_to_le32(instr);
  283. nb = (instr >> 11) & 0x1f;
  284. if (nb == 0)
  285. nb = 32;
  286. }
  287. if (nb + reg * 4 > 128) {
  288. nb0 = nb + reg * 4 - 128;
  289. nb = 128 - reg * 4;
  290. }
  291. #ifdef __LITTLE_ENDIAN__
  292. /*
  293. * String instructions are endian neutral but the code
  294. * below is not. Force byte swapping on so that the
  295. * effects of swizzling are undone in the load/store
  296. * loops below.
  297. */
  298. flags ^= SW;
  299. #endif
  300. } else {
  301. /* lwm, stmw */
  302. nb = (32 - reg) * 4;
  303. }
  304. if (!access_ok((flags & ST ? VERIFY_WRITE: VERIFY_READ), addr, nb+nb0))
  305. return -EFAULT; /* bad address */
  306. rptr = &regs->gpr[reg];
  307. p = (unsigned long) addr;
  308. bswiz = (flags & SW)? 3: 0;
  309. if (!(flags & ST)) {
  310. /*
  311. * This zeroes the top 4 bytes of the affected registers
  312. * in 64-bit mode, and also zeroes out any remaining
  313. * bytes of the last register for lsw*.
  314. */
  315. memset(rptr, 0, ((nb + 3) / 4) * sizeof(unsigned long));
  316. if (nb0 > 0)
  317. memset(&regs->gpr[0], 0,
  318. ((nb0 + 3) / 4) * sizeof(unsigned long));
  319. for (i = 0; i < nb; ++i, ++p)
  320. if (__get_user_inatomic(REG_BYTE(rptr, i ^ bswiz),
  321. SWIZ_PTR(p)))
  322. return -EFAULT;
  323. if (nb0 > 0) {
  324. rptr = &regs->gpr[0];
  325. addr += nb;
  326. for (i = 0; i < nb0; ++i, ++p)
  327. if (__get_user_inatomic(REG_BYTE(rptr,
  328. i ^ bswiz),
  329. SWIZ_PTR(p)))
  330. return -EFAULT;
  331. }
  332. } else {
  333. for (i = 0; i < nb; ++i, ++p)
  334. if (__put_user_inatomic(REG_BYTE(rptr, i ^ bswiz),
  335. SWIZ_PTR(p)))
  336. return -EFAULT;
  337. if (nb0 > 0) {
  338. rptr = &regs->gpr[0];
  339. addr += nb;
  340. for (i = 0; i < nb0; ++i, ++p)
  341. if (__put_user_inatomic(REG_BYTE(rptr,
  342. i ^ bswiz),
  343. SWIZ_PTR(p)))
  344. return -EFAULT;
  345. }
  346. }
  347. return 1;
  348. }
  349. /*
  350. * Emulate floating-point pair loads and stores.
  351. * Only POWER6 has these instructions, and it does true little-endian,
  352. * so we don't need the address swizzling.
  353. */
  354. static int emulate_fp_pair(unsigned char __user *addr, unsigned int reg,
  355. unsigned int flags)
  356. {
  357. char *ptr0 = (char *) &current->thread.TS_FPR(reg);
  358. char *ptr1 = (char *) &current->thread.TS_FPR(reg+1);
  359. int i, ret, sw = 0;
  360. if (reg & 1)
  361. return 0; /* invalid form: FRS/FRT must be even */
  362. if (flags & SW)
  363. sw = 7;
  364. ret = 0;
  365. for (i = 0; i < 8; ++i) {
  366. if (!(flags & ST)) {
  367. ret |= __get_user(ptr0[i^sw], addr + i);
  368. ret |= __get_user(ptr1[i^sw], addr + i + 8);
  369. } else {
  370. ret |= __put_user(ptr0[i^sw], addr + i);
  371. ret |= __put_user(ptr1[i^sw], addr + i + 8);
  372. }
  373. }
  374. if (ret)
  375. return -EFAULT;
  376. return 1; /* exception handled and fixed up */
  377. }
  378. #ifdef CONFIG_PPC64
  379. static int emulate_lq_stq(struct pt_regs *regs, unsigned char __user *addr,
  380. unsigned int reg, unsigned int flags)
  381. {
  382. char *ptr0 = (char *)&regs->gpr[reg];
  383. char *ptr1 = (char *)&regs->gpr[reg+1];
  384. int i, ret, sw = 0;
  385. if (reg & 1)
  386. return 0; /* invalid form: GPR must be even */
  387. if (flags & SW)
  388. sw = 7;
  389. ret = 0;
  390. for (i = 0; i < 8; ++i) {
  391. if (!(flags & ST)) {
  392. ret |= __get_user(ptr0[i^sw], addr + i);
  393. ret |= __get_user(ptr1[i^sw], addr + i + 8);
  394. } else {
  395. ret |= __put_user(ptr0[i^sw], addr + i);
  396. ret |= __put_user(ptr1[i^sw], addr + i + 8);
  397. }
  398. }
  399. if (ret)
  400. return -EFAULT;
  401. return 1; /* exception handled and fixed up */
  402. }
  403. #endif /* CONFIG_PPC64 */
  404. #ifdef CONFIG_SPE
  405. static struct aligninfo spe_aligninfo[32] = {
  406. { 8, LD+E8 }, /* 0 00 00: evldd[x] */
  407. { 8, LD+E4 }, /* 0 00 01: evldw[x] */
  408. { 8, LD }, /* 0 00 10: evldh[x] */
  409. INVALID, /* 0 00 11 */
  410. { 2, LD }, /* 0 01 00: evlhhesplat[x] */
  411. INVALID, /* 0 01 01 */
  412. { 2, LD }, /* 0 01 10: evlhhousplat[x] */
  413. { 2, LD+SE }, /* 0 01 11: evlhhossplat[x] */
  414. { 4, LD }, /* 0 10 00: evlwhe[x] */
  415. INVALID, /* 0 10 01 */
  416. { 4, LD }, /* 0 10 10: evlwhou[x] */
  417. { 4, LD+SE }, /* 0 10 11: evlwhos[x] */
  418. { 4, LD+E4 }, /* 0 11 00: evlwwsplat[x] */
  419. INVALID, /* 0 11 01 */
  420. { 4, LD }, /* 0 11 10: evlwhsplat[x] */
  421. INVALID, /* 0 11 11 */
  422. { 8, ST+E8 }, /* 1 00 00: evstdd[x] */
  423. { 8, ST+E4 }, /* 1 00 01: evstdw[x] */
  424. { 8, ST }, /* 1 00 10: evstdh[x] */
  425. INVALID, /* 1 00 11 */
  426. INVALID, /* 1 01 00 */
  427. INVALID, /* 1 01 01 */
  428. INVALID, /* 1 01 10 */
  429. INVALID, /* 1 01 11 */
  430. { 4, ST }, /* 1 10 00: evstwhe[x] */
  431. INVALID, /* 1 10 01 */
  432. { 4, ST }, /* 1 10 10: evstwho[x] */
  433. INVALID, /* 1 10 11 */
  434. { 4, ST+E4 }, /* 1 11 00: evstwwe[x] */
  435. INVALID, /* 1 11 01 */
  436. { 4, ST+E4 }, /* 1 11 10: evstwwo[x] */
  437. INVALID, /* 1 11 11 */
  438. };
  439. #define EVLDD 0x00
  440. #define EVLDW 0x01
  441. #define EVLDH 0x02
  442. #define EVLHHESPLAT 0x04
  443. #define EVLHHOUSPLAT 0x06
  444. #define EVLHHOSSPLAT 0x07
  445. #define EVLWHE 0x08
  446. #define EVLWHOU 0x0A
  447. #define EVLWHOS 0x0B
  448. #define EVLWWSPLAT 0x0C
  449. #define EVLWHSPLAT 0x0E
  450. #define EVSTDD 0x10
  451. #define EVSTDW 0x11
  452. #define EVSTDH 0x12
  453. #define EVSTWHE 0x18
  454. #define EVSTWHO 0x1A
  455. #define EVSTWWE 0x1C
  456. #define EVSTWWO 0x1E
  457. /*
  458. * Emulate SPE loads and stores.
  459. * Only Book-E has these instructions, and it does true little-endian,
  460. * so we don't need the address swizzling.
  461. */
  462. static int emulate_spe(struct pt_regs *regs, unsigned int reg,
  463. unsigned int instr)
  464. {
  465. int ret;
  466. union {
  467. u64 ll;
  468. u32 w[2];
  469. u16 h[4];
  470. u8 v[8];
  471. } data, temp;
  472. unsigned char __user *p, *addr;
  473. unsigned long *evr = &current->thread.evr[reg];
  474. unsigned int nb, flags;
  475. instr = (instr >> 1) & 0x1f;
  476. /* DAR has the operand effective address */
  477. addr = (unsigned char __user *)regs->dar;
  478. nb = spe_aligninfo[instr].len;
  479. flags = spe_aligninfo[instr].flags;
  480. /* Verify the address of the operand */
  481. if (unlikely(user_mode(regs) &&
  482. !access_ok((flags & ST ? VERIFY_WRITE : VERIFY_READ),
  483. addr, nb)))
  484. return -EFAULT;
  485. /* userland only */
  486. if (unlikely(!user_mode(regs)))
  487. return 0;
  488. flush_spe_to_thread(current);
  489. /* If we are loading, get the data from user space, else
  490. * get it from register values
  491. */
  492. if (flags & ST) {
  493. data.ll = 0;
  494. switch (instr) {
  495. case EVSTDD:
  496. case EVSTDW:
  497. case EVSTDH:
  498. data.w[0] = *evr;
  499. data.w[1] = regs->gpr[reg];
  500. break;
  501. case EVSTWHE:
  502. data.h[2] = *evr >> 16;
  503. data.h[3] = regs->gpr[reg] >> 16;
  504. break;
  505. case EVSTWHO:
  506. data.h[2] = *evr & 0xffff;
  507. data.h[3] = regs->gpr[reg] & 0xffff;
  508. break;
  509. case EVSTWWE:
  510. data.w[1] = *evr;
  511. break;
  512. case EVSTWWO:
  513. data.w[1] = regs->gpr[reg];
  514. break;
  515. default:
  516. return -EINVAL;
  517. }
  518. } else {
  519. temp.ll = data.ll = 0;
  520. ret = 0;
  521. p = addr;
  522. switch (nb) {
  523. case 8:
  524. ret |= __get_user_inatomic(temp.v[0], p++);
  525. ret |= __get_user_inatomic(temp.v[1], p++);
  526. ret |= __get_user_inatomic(temp.v[2], p++);
  527. ret |= __get_user_inatomic(temp.v[3], p++);
  528. case 4:
  529. ret |= __get_user_inatomic(temp.v[4], p++);
  530. ret |= __get_user_inatomic(temp.v[5], p++);
  531. case 2:
  532. ret |= __get_user_inatomic(temp.v[6], p++);
  533. ret |= __get_user_inatomic(temp.v[7], p++);
  534. if (unlikely(ret))
  535. return -EFAULT;
  536. }
  537. switch (instr) {
  538. case EVLDD:
  539. case EVLDW:
  540. case EVLDH:
  541. data.ll = temp.ll;
  542. break;
  543. case EVLHHESPLAT:
  544. data.h[0] = temp.h[3];
  545. data.h[2] = temp.h[3];
  546. break;
  547. case EVLHHOUSPLAT:
  548. case EVLHHOSSPLAT:
  549. data.h[1] = temp.h[3];
  550. data.h[3] = temp.h[3];
  551. break;
  552. case EVLWHE:
  553. data.h[0] = temp.h[2];
  554. data.h[2] = temp.h[3];
  555. break;
  556. case EVLWHOU:
  557. case EVLWHOS:
  558. data.h[1] = temp.h[2];
  559. data.h[3] = temp.h[3];
  560. break;
  561. case EVLWWSPLAT:
  562. data.w[0] = temp.w[1];
  563. data.w[1] = temp.w[1];
  564. break;
  565. case EVLWHSPLAT:
  566. data.h[0] = temp.h[2];
  567. data.h[1] = temp.h[2];
  568. data.h[2] = temp.h[3];
  569. data.h[3] = temp.h[3];
  570. break;
  571. default:
  572. return -EINVAL;
  573. }
  574. }
  575. if (flags & SW) {
  576. switch (flags & 0xf0) {
  577. case E8:
  578. data.ll = swab64(data.ll);
  579. break;
  580. case E4:
  581. data.w[0] = swab32(data.w[0]);
  582. data.w[1] = swab32(data.w[1]);
  583. break;
  584. /* Its half word endian */
  585. default:
  586. data.h[0] = swab16(data.h[0]);
  587. data.h[1] = swab16(data.h[1]);
  588. data.h[2] = swab16(data.h[2]);
  589. data.h[3] = swab16(data.h[3]);
  590. break;
  591. }
  592. }
  593. if (flags & SE) {
  594. data.w[0] = (s16)data.h[1];
  595. data.w[1] = (s16)data.h[3];
  596. }
  597. /* Store result to memory or update registers */
  598. if (flags & ST) {
  599. ret = 0;
  600. p = addr;
  601. switch (nb) {
  602. case 8:
  603. ret |= __put_user_inatomic(data.v[0], p++);
  604. ret |= __put_user_inatomic(data.v[1], p++);
  605. ret |= __put_user_inatomic(data.v[2], p++);
  606. ret |= __put_user_inatomic(data.v[3], p++);
  607. case 4:
  608. ret |= __put_user_inatomic(data.v[4], p++);
  609. ret |= __put_user_inatomic(data.v[5], p++);
  610. case 2:
  611. ret |= __put_user_inatomic(data.v[6], p++);
  612. ret |= __put_user_inatomic(data.v[7], p++);
  613. }
  614. if (unlikely(ret))
  615. return -EFAULT;
  616. } else {
  617. *evr = data.w[0];
  618. regs->gpr[reg] = data.w[1];
  619. }
  620. return 1;
  621. }
  622. #endif /* CONFIG_SPE */
  623. #ifdef CONFIG_VSX
  624. /*
  625. * Emulate VSX instructions...
  626. */
  627. static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
  628. unsigned int areg, struct pt_regs *regs,
  629. unsigned int flags, unsigned int length,
  630. unsigned int elsize)
  631. {
  632. char *ptr;
  633. unsigned long *lptr;
  634. int ret = 0;
  635. int sw = 0;
  636. int i, j;
  637. /* userland only */
  638. if (unlikely(!user_mode(regs)))
  639. return 0;
  640. flush_vsx_to_thread(current);
  641. if (reg < 32)
  642. ptr = (char *) &current->thread.fp_state.fpr[reg][0];
  643. else
  644. ptr = (char *) &current->thread.vr_state.vr[reg - 32];
  645. lptr = (unsigned long *) ptr;
  646. #ifdef __LITTLE_ENDIAN__
  647. if (flags & SW) {
  648. elsize = length;
  649. sw = length-1;
  650. } else {
  651. /*
  652. * The elements are BE ordered, even in LE mode, so process
  653. * them in reverse order.
  654. */
  655. addr += length - elsize;
  656. /* 8 byte memory accesses go in the top 8 bytes of the VR */
  657. if (length == 8)
  658. ptr += 8;
  659. }
  660. #else
  661. if (flags & SW)
  662. sw = elsize-1;
  663. #endif
  664. for (j = 0; j < length; j += elsize) {
  665. for (i = 0; i < elsize; ++i) {
  666. if (flags & ST)
  667. ret |= __put_user(ptr[i^sw], addr + i);
  668. else
  669. ret |= __get_user(ptr[i^sw], addr + i);
  670. }
  671. ptr += elsize;
  672. #ifdef __LITTLE_ENDIAN__
  673. addr -= elsize;
  674. #else
  675. addr += elsize;
  676. #endif
  677. }
  678. #ifdef __BIG_ENDIAN__
  679. #define VSX_HI 0
  680. #define VSX_LO 1
  681. #else
  682. #define VSX_HI 1
  683. #define VSX_LO 0
  684. #endif
  685. if (!ret) {
  686. if (flags & U)
  687. regs->gpr[areg] = regs->dar;
  688. /* Splat load copies the same data to top and bottom 8 bytes */
  689. if (flags & SPLT)
  690. lptr[VSX_LO] = lptr[VSX_HI];
  691. /* For 8 byte loads, zero the low 8 bytes */
  692. else if (!(flags & ST) && (8 == length))
  693. lptr[VSX_LO] = 0;
  694. } else
  695. return -EFAULT;
  696. return 1;
  697. }
  698. #endif
  699. /*
  700. * Called on alignment exception. Attempts to fixup
  701. *
  702. * Return 1 on success
  703. * Return 0 if unable to handle the interrupt
  704. * Return -EFAULT if data address is bad
  705. */
  706. int fix_alignment(struct pt_regs *regs)
  707. {
  708. unsigned int instr, nb, flags, instruction = 0;
  709. unsigned int reg, areg;
  710. unsigned int dsisr;
  711. unsigned char __user *addr;
  712. unsigned long p, swiz;
  713. int ret, i;
  714. union data {
  715. u64 ll;
  716. double dd;
  717. unsigned char v[8];
  718. struct {
  719. #ifdef __LITTLE_ENDIAN__
  720. int low32;
  721. unsigned hi32;
  722. #else
  723. unsigned hi32;
  724. int low32;
  725. #endif
  726. } x32;
  727. struct {
  728. #ifdef __LITTLE_ENDIAN__
  729. short low16;
  730. unsigned char hi48[6];
  731. #else
  732. unsigned char hi48[6];
  733. short low16;
  734. #endif
  735. } x16;
  736. } data;
  737. /*
  738. * We require a complete register set, if not, then our assembly
  739. * is broken
  740. */
  741. CHECK_FULL_REGS(regs);
  742. dsisr = regs->dsisr;
  743. /* Some processors don't provide us with a DSISR we can use here,
  744. * let's make one up from the instruction
  745. */
  746. if (cpu_has_feature(CPU_FTR_NODSISRALIGN)) {
  747. unsigned long pc = regs->nip;
  748. if (cpu_has_feature(CPU_FTR_PPC_LE) && (regs->msr & MSR_LE))
  749. pc ^= 4;
  750. if (unlikely(__get_user_inatomic(instr,
  751. (unsigned int __user *)pc)))
  752. return -EFAULT;
  753. if (cpu_has_feature(CPU_FTR_REAL_LE) && (regs->msr & MSR_LE))
  754. instr = cpu_to_le32(instr);
  755. dsisr = make_dsisr(instr);
  756. instruction = instr;
  757. }
  758. /* extract the operation and registers from the dsisr */
  759. reg = (dsisr >> 5) & 0x1f; /* source/dest register */
  760. areg = dsisr & 0x1f; /* register to update */
  761. #ifdef CONFIG_SPE
  762. if ((instr >> 26) == 0x4) {
  763. PPC_WARN_ALIGNMENT(spe, regs);
  764. return emulate_spe(regs, reg, instr);
  765. }
  766. #endif
  767. instr = (dsisr >> 10) & 0x7f;
  768. instr |= (dsisr >> 13) & 0x60;
  769. /* Lookup the operation in our table */
  770. nb = aligninfo[instr].len;
  771. flags = aligninfo[instr].flags;
  772. /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */
  773. if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) {
  774. nb = 8;
  775. flags = LD+SW;
  776. } else if (IS_XFORM(instruction) &&
  777. ((instruction >> 1) & 0x3ff) == 660) {
  778. nb = 8;
  779. flags = ST+SW;
  780. }
  781. /* Byteswap little endian loads and stores */
  782. swiz = 0;
  783. if ((regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE)) {
  784. flags ^= SW;
  785. #ifdef __BIG_ENDIAN__
  786. /*
  787. * So-called "PowerPC little endian" mode works by
  788. * swizzling addresses rather than by actually doing
  789. * any byte-swapping. To emulate this, we XOR each
  790. * byte address with 7. We also byte-swap, because
  791. * the processor's address swizzling depends on the
  792. * operand size (it xors the address with 7 for bytes,
  793. * 6 for halfwords, 4 for words, 0 for doublewords) but
  794. * we will xor with 7 and load/store each byte separately.
  795. */
  796. if (cpu_has_feature(CPU_FTR_PPC_LE))
  797. swiz = 7;
  798. #endif
  799. }
  800. /* DAR has the operand effective address */
  801. addr = (unsigned char __user *)regs->dar;
  802. #ifdef CONFIG_VSX
  803. if ((instruction & 0xfc00003e) == 0x7c000018) {
  804. unsigned int elsize;
  805. /* Additional register addressing bit (64 VSX vs 32 FPR/GPR) */
  806. reg |= (instruction & 0x1) << 5;
  807. /* Simple inline decoder instead of a table */
  808. /* VSX has only 8 and 16 byte memory accesses */
  809. nb = 8;
  810. if (instruction & 0x200)
  811. nb = 16;
  812. /* Vector stores in little-endian mode swap individual
  813. elements, so process them separately */
  814. elsize = 4;
  815. if (instruction & 0x80)
  816. elsize = 8;
  817. flags = 0;
  818. if ((regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE))
  819. flags |= SW;
  820. if (instruction & 0x100)
  821. flags |= ST;
  822. if (instruction & 0x040)
  823. flags |= U;
  824. /* splat load needs a special decoder */
  825. if ((instruction & 0x400) == 0){
  826. flags |= SPLT;
  827. nb = 8;
  828. }
  829. PPC_WARN_ALIGNMENT(vsx, regs);
  830. return emulate_vsx(addr, reg, areg, regs, flags, nb, elsize);
  831. }
  832. #endif
  833. /* A size of 0 indicates an instruction we don't support, with
  834. * the exception of DCBZ which is handled as a special case here
  835. */
  836. if (instr == DCBZ) {
  837. PPC_WARN_ALIGNMENT(dcbz, regs);
  838. return emulate_dcbz(regs, addr);
  839. }
  840. if (unlikely(nb == 0))
  841. return 0;
  842. /* Load/Store Multiple instructions are handled in their own
  843. * function
  844. */
  845. if (flags & M) {
  846. PPC_WARN_ALIGNMENT(multiple, regs);
  847. return emulate_multiple(regs, addr, reg, nb,
  848. flags, instr, swiz);
  849. }
  850. /* Verify the address of the operand */
  851. if (unlikely(user_mode(regs) &&
  852. !access_ok((flags & ST ? VERIFY_WRITE : VERIFY_READ),
  853. addr, nb)))
  854. return -EFAULT;
  855. /* Force the fprs into the save area so we can reference them */
  856. if (flags & F) {
  857. /* userland only */
  858. if (unlikely(!user_mode(regs)))
  859. return 0;
  860. flush_fp_to_thread(current);
  861. }
  862. if ((nb == 16)) {
  863. if (flags & F) {
  864. /* Special case for 16-byte FP loads and stores */
  865. PPC_WARN_ALIGNMENT(fp_pair, regs);
  866. return emulate_fp_pair(addr, reg, flags);
  867. } else {
  868. #ifdef CONFIG_PPC64
  869. /* Special case for 16-byte loads and stores */
  870. PPC_WARN_ALIGNMENT(lq_stq, regs);
  871. return emulate_lq_stq(regs, addr, reg, flags);
  872. #else
  873. return 0;
  874. #endif
  875. }
  876. }
  877. PPC_WARN_ALIGNMENT(unaligned, regs);
  878. /* If we are loading, get the data from user space, else
  879. * get it from register values
  880. */
  881. if (!(flags & ST)) {
  882. unsigned int start = 0;
  883. switch (nb) {
  884. case 4:
  885. start = offsetof(union data, x32.low32);
  886. break;
  887. case 2:
  888. start = offsetof(union data, x16.low16);
  889. break;
  890. }
  891. data.ll = 0;
  892. ret = 0;
  893. p = (unsigned long)addr;
  894. for (i = 0; i < nb; i++)
  895. ret |= __get_user_inatomic(data.v[start + i],
  896. SWIZ_PTR(p++));
  897. if (unlikely(ret))
  898. return -EFAULT;
  899. } else if (flags & F) {
  900. data.ll = current->thread.TS_FPR(reg);
  901. if (flags & S) {
  902. /* Single-precision FP store requires conversion... */
  903. #ifdef CONFIG_PPC_FPU
  904. preempt_disable();
  905. enable_kernel_fp();
  906. cvt_df(&data.dd, (float *)&data.x32.low32);
  907. preempt_enable();
  908. #else
  909. return 0;
  910. #endif
  911. }
  912. } else
  913. data.ll = regs->gpr[reg];
  914. if (flags & SW) {
  915. switch (nb) {
  916. case 8:
  917. data.ll = swab64(data.ll);
  918. break;
  919. case 4:
  920. data.x32.low32 = swab32(data.x32.low32);
  921. break;
  922. case 2:
  923. data.x16.low16 = swab16(data.x16.low16);
  924. break;
  925. }
  926. }
  927. /* Perform other misc operations like sign extension
  928. * or floating point single precision conversion
  929. */
  930. switch (flags & ~(U|SW)) {
  931. case LD+SE: /* sign extending integer loads */
  932. case LD+F+SE: /* sign extend for lfiwax */
  933. if ( nb == 2 )
  934. data.ll = data.x16.low16;
  935. else /* nb must be 4 */
  936. data.ll = data.x32.low32;
  937. break;
  938. /* Single-precision FP load requires conversion... */
  939. case LD+F+S:
  940. #ifdef CONFIG_PPC_FPU
  941. preempt_disable();
  942. enable_kernel_fp();
  943. cvt_fd((float *)&data.x32.low32, &data.dd);
  944. preempt_enable();
  945. #else
  946. return 0;
  947. #endif
  948. break;
  949. }
  950. /* Store result to memory or update registers */
  951. if (flags & ST) {
  952. unsigned int start = 0;
  953. switch (nb) {
  954. case 4:
  955. start = offsetof(union data, x32.low32);
  956. break;
  957. case 2:
  958. start = offsetof(union data, x16.low16);
  959. break;
  960. }
  961. ret = 0;
  962. p = (unsigned long)addr;
  963. for (i = 0; i < nb; i++)
  964. ret |= __put_user_inatomic(data.v[start + i],
  965. SWIZ_PTR(p++));
  966. if (unlikely(ret))
  967. return -EFAULT;
  968. } else if (flags & F)
  969. current->thread.TS_FPR(reg) = data.ll;
  970. else
  971. regs->gpr[reg] = data.ll;
  972. /* Update RA as needed */
  973. if (flags & U)
  974. regs->gpr[areg] = regs->dar;
  975. return 1;
  976. }