unaligned.c 54 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124
  1. /*
  2. * Handle unaligned accesses by emulation.
  3. *
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
  9. * Copyright (C) 1999 Silicon Graphics, Inc.
  10. * Copyright (C) 2014 Imagination Technologies Ltd.
  11. *
  12. * This file contains exception handler for address error exception with the
  13. * special capability to execute faulting instructions in software. The
  14. * handler does not try to handle the case when the program counter points
  15. * to an address not aligned to a word boundary.
  16. *
  17. * Putting data to unaligned addresses is a bad practice even on Intel where
  18. * only the performance is affected. Much worse is that such code is non-
  19. * portable. Due to several programs that die on MIPS due to alignment
  20. * problems I decided to implement this handler anyway though I originally
  21. * didn't intend to do this at all for user code.
  22. *
  23. * For now I enable fixing of address errors by default to make life easier.
  24. * I however intend to disable this somewhen in the future when the alignment
  25. * problems with user programs have been fixed. For programmers this is the
  26. * right way to go.
  27. *
  28. * Fixing address errors is a per process option. The option is inherited
  29. * across fork(2) and execve(2) calls. If you really want to use the
  30. * option in your user programs - I discourage the use of the software
  31. * emulation strongly - use the following code in your userland stuff:
  32. *
  33. * #include <sys/sysmips.h>
  34. *
  35. * ...
  36. * sysmips(MIPS_FIXADE, x);
  37. * ...
  38. *
  39. * The argument x is 0 for disabling software emulation, enabled otherwise.
  40. *
  41. * Below a little program to play around with this feature.
  42. *
  43. * #include <stdio.h>
  44. * #include <sys/sysmips.h>
  45. *
  46. * struct foo {
  47. * unsigned char bar[8];
  48. * };
  49. *
  50. * main(int argc, char *argv[])
  51. * {
  52. * struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
  53. * unsigned int *p = (unsigned int *) (x.bar + 3);
  54. * int i;
  55. *
  56. * if (argc > 1)
  57. * sysmips(MIPS_FIXADE, atoi(argv[1]));
  58. *
  59. * printf("*p = %08lx\n", *p);
  60. *
  61. * *p = 0xdeadface;
  62. *
  63. * for(i = 0; i <= 7; i++)
  64. * printf("%02x ", x.bar[i]);
  65. * printf("\n");
  66. * }
  67. *
  68. * Coprocessor loads are not supported; I think this case is unimportant
  69. * in the practice.
  70. *
  71. * TODO: Handle ndc (attempted store to doubleword in uncached memory)
  72. * exception for the R6000.
  73. * A store crossing a page boundary might be executed only partially.
  74. * Undo the partial store in this case.
  75. */
  76. #include <linux/context_tracking.h>
  77. #include <linux/mm.h>
  78. #include <linux/signal.h>
  79. #include <linux/smp.h>
  80. #include <linux/sched.h>
  81. #include <linux/debugfs.h>
  82. #include <linux/perf_event.h>
  83. #include <asm/asm.h>
  84. #include <asm/branch.h>
  85. #include <asm/byteorder.h>
  86. #include <asm/cop2.h>
  87. #include <asm/fpu.h>
  88. #include <asm/fpu_emulator.h>
  89. #include <asm/inst.h>
  90. #include <asm/uaccess.h>
  91. #include <asm/fpu.h>
  92. #include <asm/fpu_emulator.h>
  93. #define STR(x) __STR(x)
  94. #define __STR(x) #x
  95. enum {
  96. UNALIGNED_ACTION_QUIET,
  97. UNALIGNED_ACTION_SIGNAL,
  98. UNALIGNED_ACTION_SHOW,
  99. };
  100. #ifdef CONFIG_DEBUG_FS
  101. static u32 unaligned_instructions;
  102. static u32 unaligned_action;
  103. #else
  104. #define unaligned_action UNALIGNED_ACTION_QUIET
  105. #endif
  106. extern void show_registers(struct pt_regs *regs);
  107. #ifdef __BIG_ENDIAN
  108. #define LoadHW(addr, value, res) \
  109. __asm__ __volatile__ (".set\tnoat\n" \
  110. "1:\t"user_lb("%0", "0(%2)")"\n" \
  111. "2:\t"user_lbu("$1", "1(%2)")"\n\t" \
  112. "sll\t%0, 0x8\n\t" \
  113. "or\t%0, $1\n\t" \
  114. "li\t%1, 0\n" \
  115. "3:\t.set\tat\n\t" \
  116. ".insn\n\t" \
  117. ".section\t.fixup,\"ax\"\n\t" \
  118. "4:\tli\t%1, %3\n\t" \
  119. "j\t3b\n\t" \
  120. ".previous\n\t" \
  121. ".section\t__ex_table,\"a\"\n\t" \
  122. STR(PTR)"\t1b, 4b\n\t" \
  123. STR(PTR)"\t2b, 4b\n\t" \
  124. ".previous" \
  125. : "=&r" (value), "=r" (res) \
  126. : "r" (addr), "i" (-EFAULT));
  127. #ifndef CONFIG_CPU_MIPSR6
  128. #define LoadW(addr, value, res) \
  129. __asm__ __volatile__ ( \
  130. "1:\t"user_lwl("%0", "(%2)")"\n" \
  131. "2:\t"user_lwr("%0", "3(%2)")"\n\t" \
  132. "li\t%1, 0\n" \
  133. "3:\n\t" \
  134. ".insn\n\t" \
  135. ".section\t.fixup,\"ax\"\n\t" \
  136. "4:\tli\t%1, %3\n\t" \
  137. "j\t3b\n\t" \
  138. ".previous\n\t" \
  139. ".section\t__ex_table,\"a\"\n\t" \
  140. STR(PTR)"\t1b, 4b\n\t" \
  141. STR(PTR)"\t2b, 4b\n\t" \
  142. ".previous" \
  143. : "=&r" (value), "=r" (res) \
  144. : "r" (addr), "i" (-EFAULT));
  145. #else
  146. /* MIPSR6 has no lwl instruction */
  147. #define LoadW(addr, value, res) \
  148. __asm__ __volatile__ ( \
  149. ".set\tpush\n" \
  150. ".set\tnoat\n\t" \
  151. "1:"user_lb("%0", "0(%2)")"\n\t" \
  152. "2:"user_lbu("$1", "1(%2)")"\n\t" \
  153. "sll\t%0, 0x8\n\t" \
  154. "or\t%0, $1\n\t" \
  155. "3:"user_lbu("$1", "2(%2)")"\n\t" \
  156. "sll\t%0, 0x8\n\t" \
  157. "or\t%0, $1\n\t" \
  158. "4:"user_lbu("$1", "3(%2)")"\n\t" \
  159. "sll\t%0, 0x8\n\t" \
  160. "or\t%0, $1\n\t" \
  161. "li\t%1, 0\n" \
  162. ".set\tpop\n" \
  163. "10:\n\t" \
  164. ".insn\n\t" \
  165. ".section\t.fixup,\"ax\"\n\t" \
  166. "11:\tli\t%1, %3\n\t" \
  167. "j\t10b\n\t" \
  168. ".previous\n\t" \
  169. ".section\t__ex_table,\"a\"\n\t" \
  170. STR(PTR)"\t1b, 11b\n\t" \
  171. STR(PTR)"\t2b, 11b\n\t" \
  172. STR(PTR)"\t3b, 11b\n\t" \
  173. STR(PTR)"\t4b, 11b\n\t" \
  174. ".previous" \
  175. : "=&r" (value), "=r" (res) \
  176. : "r" (addr), "i" (-EFAULT));
  177. #endif /* CONFIG_CPU_MIPSR6 */
  178. #define LoadHWU(addr, value, res) \
  179. __asm__ __volatile__ ( \
  180. ".set\tnoat\n" \
  181. "1:\t"user_lbu("%0", "0(%2)")"\n" \
  182. "2:\t"user_lbu("$1", "1(%2)")"\n\t" \
  183. "sll\t%0, 0x8\n\t" \
  184. "or\t%0, $1\n\t" \
  185. "li\t%1, 0\n" \
  186. "3:\n\t" \
  187. ".insn\n\t" \
  188. ".set\tat\n\t" \
  189. ".section\t.fixup,\"ax\"\n\t" \
  190. "4:\tli\t%1, %3\n\t" \
  191. "j\t3b\n\t" \
  192. ".previous\n\t" \
  193. ".section\t__ex_table,\"a\"\n\t" \
  194. STR(PTR)"\t1b, 4b\n\t" \
  195. STR(PTR)"\t2b, 4b\n\t" \
  196. ".previous" \
  197. : "=&r" (value), "=r" (res) \
  198. : "r" (addr), "i" (-EFAULT));
  199. #ifndef CONFIG_CPU_MIPSR6
  200. #define LoadWU(addr, value, res) \
  201. __asm__ __volatile__ ( \
  202. "1:\t"user_lwl("%0", "(%2)")"\n" \
  203. "2:\t"user_lwr("%0", "3(%2)")"\n\t" \
  204. "dsll\t%0, %0, 32\n\t" \
  205. "dsrl\t%0, %0, 32\n\t" \
  206. "li\t%1, 0\n" \
  207. "3:\n\t" \
  208. ".insn\n\t" \
  209. "\t.section\t.fixup,\"ax\"\n\t" \
  210. "4:\tli\t%1, %3\n\t" \
  211. "j\t3b\n\t" \
  212. ".previous\n\t" \
  213. ".section\t__ex_table,\"a\"\n\t" \
  214. STR(PTR)"\t1b, 4b\n\t" \
  215. STR(PTR)"\t2b, 4b\n\t" \
  216. ".previous" \
  217. : "=&r" (value), "=r" (res) \
  218. : "r" (addr), "i" (-EFAULT));
  219. #define LoadDW(addr, value, res) \
  220. __asm__ __volatile__ ( \
  221. "1:\tldl\t%0, (%2)\n" \
  222. "2:\tldr\t%0, 7(%2)\n\t" \
  223. "li\t%1, 0\n" \
  224. "3:\n\t" \
  225. ".insn\n\t" \
  226. "\t.section\t.fixup,\"ax\"\n\t" \
  227. "4:\tli\t%1, %3\n\t" \
  228. "j\t3b\n\t" \
  229. ".previous\n\t" \
  230. ".section\t__ex_table,\"a\"\n\t" \
  231. STR(PTR)"\t1b, 4b\n\t" \
  232. STR(PTR)"\t2b, 4b\n\t" \
  233. ".previous" \
  234. : "=&r" (value), "=r" (res) \
  235. : "r" (addr), "i" (-EFAULT));
  236. #else
  237. /* MIPSR6 has not lwl and ldl instructions */
  238. #define LoadWU(addr, value, res) \
  239. __asm__ __volatile__ ( \
  240. ".set\tpush\n\t" \
  241. ".set\tnoat\n\t" \
  242. "1:"user_lbu("%0", "0(%2)")"\n\t" \
  243. "2:"user_lbu("$1", "1(%2)")"\n\t" \
  244. "sll\t%0, 0x8\n\t" \
  245. "or\t%0, $1\n\t" \
  246. "3:"user_lbu("$1", "2(%2)")"\n\t" \
  247. "sll\t%0, 0x8\n\t" \
  248. "or\t%0, $1\n\t" \
  249. "4:"user_lbu("$1", "3(%2)")"\n\t" \
  250. "sll\t%0, 0x8\n\t" \
  251. "or\t%0, $1\n\t" \
  252. "li\t%1, 0\n" \
  253. ".set\tpop\n" \
  254. "10:\n\t" \
  255. ".insn\n\t" \
  256. ".section\t.fixup,\"ax\"\n\t" \
  257. "11:\tli\t%1, %3\n\t" \
  258. "j\t10b\n\t" \
  259. ".previous\n\t" \
  260. ".section\t__ex_table,\"a\"\n\t" \
  261. STR(PTR)"\t1b, 11b\n\t" \
  262. STR(PTR)"\t2b, 11b\n\t" \
  263. STR(PTR)"\t3b, 11b\n\t" \
  264. STR(PTR)"\t4b, 11b\n\t" \
  265. ".previous" \
  266. : "=&r" (value), "=r" (res) \
  267. : "r" (addr), "i" (-EFAULT));
  268. #define LoadDW(addr, value, res) \
  269. __asm__ __volatile__ ( \
  270. ".set\tpush\n\t" \
  271. ".set\tnoat\n\t" \
  272. "1:lb\t%0, 0(%2)\n\t" \
  273. "2:lbu\t $1, 1(%2)\n\t" \
  274. "dsll\t%0, 0x8\n\t" \
  275. "or\t%0, $1\n\t" \
  276. "3:lbu\t$1, 2(%2)\n\t" \
  277. "dsll\t%0, 0x8\n\t" \
  278. "or\t%0, $1\n\t" \
  279. "4:lbu\t$1, 3(%2)\n\t" \
  280. "dsll\t%0, 0x8\n\t" \
  281. "or\t%0, $1\n\t" \
  282. "5:lbu\t$1, 4(%2)\n\t" \
  283. "dsll\t%0, 0x8\n\t" \
  284. "or\t%0, $1\n\t" \
  285. "6:lbu\t$1, 5(%2)\n\t" \
  286. "dsll\t%0, 0x8\n\t" \
  287. "or\t%0, $1\n\t" \
  288. "7:lbu\t$1, 6(%2)\n\t" \
  289. "dsll\t%0, 0x8\n\t" \
  290. "or\t%0, $1\n\t" \
  291. "8:lbu\t$1, 7(%2)\n\t" \
  292. "dsll\t%0, 0x8\n\t" \
  293. "or\t%0, $1\n\t" \
  294. "li\t%1, 0\n" \
  295. ".set\tpop\n\t" \
  296. "10:\n\t" \
  297. ".insn\n\t" \
  298. ".section\t.fixup,\"ax\"\n\t" \
  299. "11:\tli\t%1, %3\n\t" \
  300. "j\t10b\n\t" \
  301. ".previous\n\t" \
  302. ".section\t__ex_table,\"a\"\n\t" \
  303. STR(PTR)"\t1b, 11b\n\t" \
  304. STR(PTR)"\t2b, 11b\n\t" \
  305. STR(PTR)"\t3b, 11b\n\t" \
  306. STR(PTR)"\t4b, 11b\n\t" \
  307. STR(PTR)"\t5b, 11b\n\t" \
  308. STR(PTR)"\t6b, 11b\n\t" \
  309. STR(PTR)"\t7b, 11b\n\t" \
  310. STR(PTR)"\t8b, 11b\n\t" \
  311. ".previous" \
  312. : "=&r" (value), "=r" (res) \
  313. : "r" (addr), "i" (-EFAULT));
  314. #endif /* CONFIG_CPU_MIPSR6 */
  315. #define StoreHW(addr, value, res) \
  316. __asm__ __volatile__ ( \
  317. ".set\tnoat\n" \
  318. "1:\t"user_sb("%1", "1(%2)")"\n" \
  319. "srl\t$1, %1, 0x8\n" \
  320. "2:\t"user_sb("$1", "0(%2)")"\n" \
  321. ".set\tat\n\t" \
  322. "li\t%0, 0\n" \
  323. "3:\n\t" \
  324. ".insn\n\t" \
  325. ".section\t.fixup,\"ax\"\n\t" \
  326. "4:\tli\t%0, %3\n\t" \
  327. "j\t3b\n\t" \
  328. ".previous\n\t" \
  329. ".section\t__ex_table,\"a\"\n\t" \
  330. STR(PTR)"\t1b, 4b\n\t" \
  331. STR(PTR)"\t2b, 4b\n\t" \
  332. ".previous" \
  333. : "=r" (res) \
  334. : "r" (value), "r" (addr), "i" (-EFAULT));
  335. #ifndef CONFIG_CPU_MIPSR6
  336. #define StoreW(addr, value, res) \
  337. __asm__ __volatile__ ( \
  338. "1:\t"user_swl("%1", "(%2)")"\n" \
  339. "2:\t"user_swr("%1", "3(%2)")"\n\t" \
  340. "li\t%0, 0\n" \
  341. "3:\n\t" \
  342. ".insn\n\t" \
  343. ".section\t.fixup,\"ax\"\n\t" \
  344. "4:\tli\t%0, %3\n\t" \
  345. "j\t3b\n\t" \
  346. ".previous\n\t" \
  347. ".section\t__ex_table,\"a\"\n\t" \
  348. STR(PTR)"\t1b, 4b\n\t" \
  349. STR(PTR)"\t2b, 4b\n\t" \
  350. ".previous" \
  351. : "=r" (res) \
  352. : "r" (value), "r" (addr), "i" (-EFAULT));
  353. #define StoreDW(addr, value, res) \
  354. __asm__ __volatile__ ( \
  355. "1:\tsdl\t%1,(%2)\n" \
  356. "2:\tsdr\t%1, 7(%2)\n\t" \
  357. "li\t%0, 0\n" \
  358. "3:\n\t" \
  359. ".insn\n\t" \
  360. ".section\t.fixup,\"ax\"\n\t" \
  361. "4:\tli\t%0, %3\n\t" \
  362. "j\t3b\n\t" \
  363. ".previous\n\t" \
  364. ".section\t__ex_table,\"a\"\n\t" \
  365. STR(PTR)"\t1b, 4b\n\t" \
  366. STR(PTR)"\t2b, 4b\n\t" \
  367. ".previous" \
  368. : "=r" (res) \
  369. : "r" (value), "r" (addr), "i" (-EFAULT));
  370. #else
  371. /* MIPSR6 has no swl and sdl instructions */
  372. #define StoreW(addr, value, res) \
  373. __asm__ __volatile__ ( \
  374. ".set\tpush\n\t" \
  375. ".set\tnoat\n\t" \
  376. "1:"user_sb("%1", "3(%2)")"\n\t" \
  377. "srl\t$1, %1, 0x8\n\t" \
  378. "2:"user_sb("$1", "2(%2)")"\n\t" \
  379. "srl\t$1, $1, 0x8\n\t" \
  380. "3:"user_sb("$1", "1(%2)")"\n\t" \
  381. "srl\t$1, $1, 0x8\n\t" \
  382. "4:"user_sb("$1", "0(%2)")"\n\t" \
  383. ".set\tpop\n\t" \
  384. "li\t%0, 0\n" \
  385. "10:\n\t" \
  386. ".insn\n\t" \
  387. ".section\t.fixup,\"ax\"\n\t" \
  388. "11:\tli\t%0, %3\n\t" \
  389. "j\t10b\n\t" \
  390. ".previous\n\t" \
  391. ".section\t__ex_table,\"a\"\n\t" \
  392. STR(PTR)"\t1b, 11b\n\t" \
  393. STR(PTR)"\t2b, 11b\n\t" \
  394. STR(PTR)"\t3b, 11b\n\t" \
  395. STR(PTR)"\t4b, 11b\n\t" \
  396. ".previous" \
  397. : "=&r" (res) \
  398. : "r" (value), "r" (addr), "i" (-EFAULT) \
  399. : "memory");
  400. #define StoreDW(addr, value, res) \
  401. __asm__ __volatile__ ( \
  402. ".set\tpush\n\t" \
  403. ".set\tnoat\n\t" \
  404. "1:sb\t%1, 7(%2)\n\t" \
  405. "dsrl\t$1, %1, 0x8\n\t" \
  406. "2:sb\t$1, 6(%2)\n\t" \
  407. "dsrl\t$1, $1, 0x8\n\t" \
  408. "3:sb\t$1, 5(%2)\n\t" \
  409. "dsrl\t$1, $1, 0x8\n\t" \
  410. "4:sb\t$1, 4(%2)\n\t" \
  411. "dsrl\t$1, $1, 0x8\n\t" \
  412. "5:sb\t$1, 3(%2)\n\t" \
  413. "dsrl\t$1, $1, 0x8\n\t" \
  414. "6:sb\t$1, 2(%2)\n\t" \
  415. "dsrl\t$1, $1, 0x8\n\t" \
  416. "7:sb\t$1, 1(%2)\n\t" \
  417. "dsrl\t$1, $1, 0x8\n\t" \
  418. "8:sb\t$1, 0(%2)\n\t" \
  419. "dsrl\t$1, $1, 0x8\n\t" \
  420. ".set\tpop\n\t" \
  421. "li\t%0, 0\n" \
  422. "10:\n\t" \
  423. ".insn\n\t" \
  424. ".section\t.fixup,\"ax\"\n\t" \
  425. "11:\tli\t%0, %3\n\t" \
  426. "j\t10b\n\t" \
  427. ".previous\n\t" \
  428. ".section\t__ex_table,\"a\"\n\t" \
  429. STR(PTR)"\t1b, 11b\n\t" \
  430. STR(PTR)"\t2b, 11b\n\t" \
  431. STR(PTR)"\t3b, 11b\n\t" \
  432. STR(PTR)"\t4b, 11b\n\t" \
  433. STR(PTR)"\t5b, 11b\n\t" \
  434. STR(PTR)"\t6b, 11b\n\t" \
  435. STR(PTR)"\t7b, 11b\n\t" \
  436. STR(PTR)"\t8b, 11b\n\t" \
  437. ".previous" \
  438. : "=&r" (res) \
  439. : "r" (value), "r" (addr), "i" (-EFAULT) \
  440. : "memory");
  441. #endif /* CONFIG_CPU_MIPSR6 */
  442. #else /* __BIG_ENDIAN */
  443. #define LoadHW(addr, value, res) \
  444. __asm__ __volatile__ (".set\tnoat\n" \
  445. "1:\t"user_lb("%0", "1(%2)")"\n" \
  446. "2:\t"user_lbu("$1", "0(%2)")"\n\t" \
  447. "sll\t%0, 0x8\n\t" \
  448. "or\t%0, $1\n\t" \
  449. "li\t%1, 0\n" \
  450. "3:\t.set\tat\n\t" \
  451. ".insn\n\t" \
  452. ".section\t.fixup,\"ax\"\n\t" \
  453. "4:\tli\t%1, %3\n\t" \
  454. "j\t3b\n\t" \
  455. ".previous\n\t" \
  456. ".section\t__ex_table,\"a\"\n\t" \
  457. STR(PTR)"\t1b, 4b\n\t" \
  458. STR(PTR)"\t2b, 4b\n\t" \
  459. ".previous" \
  460. : "=&r" (value), "=r" (res) \
  461. : "r" (addr), "i" (-EFAULT));
  462. #ifndef CONFIG_CPU_MIPSR6
  463. #define LoadW(addr, value, res) \
  464. __asm__ __volatile__ ( \
  465. "1:\t"user_lwl("%0", "3(%2)")"\n" \
  466. "2:\t"user_lwr("%0", "(%2)")"\n\t" \
  467. "li\t%1, 0\n" \
  468. "3:\n\t" \
  469. ".insn\n\t" \
  470. ".section\t.fixup,\"ax\"\n\t" \
  471. "4:\tli\t%1, %3\n\t" \
  472. "j\t3b\n\t" \
  473. ".previous\n\t" \
  474. ".section\t__ex_table,\"a\"\n\t" \
  475. STR(PTR)"\t1b, 4b\n\t" \
  476. STR(PTR)"\t2b, 4b\n\t" \
  477. ".previous" \
  478. : "=&r" (value), "=r" (res) \
  479. : "r" (addr), "i" (-EFAULT));
  480. #else
  481. /* MIPSR6 has no lwl instruction */
  482. #define LoadW(addr, value, res) \
  483. __asm__ __volatile__ ( \
  484. ".set\tpush\n" \
  485. ".set\tnoat\n\t" \
  486. "1:"user_lb("%0", "3(%2)")"\n\t" \
  487. "2:"user_lbu("$1", "2(%2)")"\n\t" \
  488. "sll\t%0, 0x8\n\t" \
  489. "or\t%0, $1\n\t" \
  490. "3:"user_lbu("$1", "1(%2)")"\n\t" \
  491. "sll\t%0, 0x8\n\t" \
  492. "or\t%0, $1\n\t" \
  493. "4:"user_lbu("$1", "0(%2)")"\n\t" \
  494. "sll\t%0, 0x8\n\t" \
  495. "or\t%0, $1\n\t" \
  496. "li\t%1, 0\n" \
  497. ".set\tpop\n" \
  498. "10:\n\t" \
  499. ".insn\n\t" \
  500. ".section\t.fixup,\"ax\"\n\t" \
  501. "11:\tli\t%1, %3\n\t" \
  502. "j\t10b\n\t" \
  503. ".previous\n\t" \
  504. ".section\t__ex_table,\"a\"\n\t" \
  505. STR(PTR)"\t1b, 11b\n\t" \
  506. STR(PTR)"\t2b, 11b\n\t" \
  507. STR(PTR)"\t3b, 11b\n\t" \
  508. STR(PTR)"\t4b, 11b\n\t" \
  509. ".previous" \
  510. : "=&r" (value), "=r" (res) \
  511. : "r" (addr), "i" (-EFAULT));
  512. #endif /* CONFIG_CPU_MIPSR6 */
  513. #define LoadHWU(addr, value, res) \
  514. __asm__ __volatile__ ( \
  515. ".set\tnoat\n" \
  516. "1:\t"user_lbu("%0", "1(%2)")"\n" \
  517. "2:\t"user_lbu("$1", "0(%2)")"\n\t" \
  518. "sll\t%0, 0x8\n\t" \
  519. "or\t%0, $1\n\t" \
  520. "li\t%1, 0\n" \
  521. "3:\n\t" \
  522. ".insn\n\t" \
  523. ".set\tat\n\t" \
  524. ".section\t.fixup,\"ax\"\n\t" \
  525. "4:\tli\t%1, %3\n\t" \
  526. "j\t3b\n\t" \
  527. ".previous\n\t" \
  528. ".section\t__ex_table,\"a\"\n\t" \
  529. STR(PTR)"\t1b, 4b\n\t" \
  530. STR(PTR)"\t2b, 4b\n\t" \
  531. ".previous" \
  532. : "=&r" (value), "=r" (res) \
  533. : "r" (addr), "i" (-EFAULT));
  534. #ifndef CONFIG_CPU_MIPSR6
  535. #define LoadWU(addr, value, res) \
  536. __asm__ __volatile__ ( \
  537. "1:\t"user_lwl("%0", "3(%2)")"\n" \
  538. "2:\t"user_lwr("%0", "(%2)")"\n\t" \
  539. "dsll\t%0, %0, 32\n\t" \
  540. "dsrl\t%0, %0, 32\n\t" \
  541. "li\t%1, 0\n" \
  542. "3:\n\t" \
  543. ".insn\n\t" \
  544. "\t.section\t.fixup,\"ax\"\n\t" \
  545. "4:\tli\t%1, %3\n\t" \
  546. "j\t3b\n\t" \
  547. ".previous\n\t" \
  548. ".section\t__ex_table,\"a\"\n\t" \
  549. STR(PTR)"\t1b, 4b\n\t" \
  550. STR(PTR)"\t2b, 4b\n\t" \
  551. ".previous" \
  552. : "=&r" (value), "=r" (res) \
  553. : "r" (addr), "i" (-EFAULT));
  554. #define LoadDW(addr, value, res) \
  555. __asm__ __volatile__ ( \
  556. "1:\tldl\t%0, 7(%2)\n" \
  557. "2:\tldr\t%0, (%2)\n\t" \
  558. "li\t%1, 0\n" \
  559. "3:\n\t" \
  560. ".insn\n\t" \
  561. "\t.section\t.fixup,\"ax\"\n\t" \
  562. "4:\tli\t%1, %3\n\t" \
  563. "j\t3b\n\t" \
  564. ".previous\n\t" \
  565. ".section\t__ex_table,\"a\"\n\t" \
  566. STR(PTR)"\t1b, 4b\n\t" \
  567. STR(PTR)"\t2b, 4b\n\t" \
  568. ".previous" \
  569. : "=&r" (value), "=r" (res) \
  570. : "r" (addr), "i" (-EFAULT));
  571. #else
  572. /* MIPSR6 has not lwl and ldl instructions */
  573. #define LoadWU(addr, value, res) \
  574. __asm__ __volatile__ ( \
  575. ".set\tpush\n\t" \
  576. ".set\tnoat\n\t" \
  577. "1:"user_lbu("%0", "3(%2)")"\n\t" \
  578. "2:"user_lbu("$1", "2(%2)")"\n\t" \
  579. "sll\t%0, 0x8\n\t" \
  580. "or\t%0, $1\n\t" \
  581. "3:"user_lbu("$1", "1(%2)")"\n\t" \
  582. "sll\t%0, 0x8\n\t" \
  583. "or\t%0, $1\n\t" \
  584. "4:"user_lbu("$1", "0(%2)")"\n\t" \
  585. "sll\t%0, 0x8\n\t" \
  586. "or\t%0, $1\n\t" \
  587. "li\t%1, 0\n" \
  588. ".set\tpop\n" \
  589. "10:\n\t" \
  590. ".insn\n\t" \
  591. ".section\t.fixup,\"ax\"\n\t" \
  592. "11:\tli\t%1, %3\n\t" \
  593. "j\t10b\n\t" \
  594. ".previous\n\t" \
  595. ".section\t__ex_table,\"a\"\n\t" \
  596. STR(PTR)"\t1b, 11b\n\t" \
  597. STR(PTR)"\t2b, 11b\n\t" \
  598. STR(PTR)"\t3b, 11b\n\t" \
  599. STR(PTR)"\t4b, 11b\n\t" \
  600. ".previous" \
  601. : "=&r" (value), "=r" (res) \
  602. : "r" (addr), "i" (-EFAULT));
  603. #define LoadDW(addr, value, res) \
  604. __asm__ __volatile__ ( \
  605. ".set\tpush\n\t" \
  606. ".set\tnoat\n\t" \
  607. "1:lb\t%0, 7(%2)\n\t" \
  608. "2:lbu\t$1, 6(%2)\n\t" \
  609. "dsll\t%0, 0x8\n\t" \
  610. "or\t%0, $1\n\t" \
  611. "3:lbu\t$1, 5(%2)\n\t" \
  612. "dsll\t%0, 0x8\n\t" \
  613. "or\t%0, $1\n\t" \
  614. "4:lbu\t$1, 4(%2)\n\t" \
  615. "dsll\t%0, 0x8\n\t" \
  616. "or\t%0, $1\n\t" \
  617. "5:lbu\t$1, 3(%2)\n\t" \
  618. "dsll\t%0, 0x8\n\t" \
  619. "or\t%0, $1\n\t" \
  620. "6:lbu\t$1, 2(%2)\n\t" \
  621. "dsll\t%0, 0x8\n\t" \
  622. "or\t%0, $1\n\t" \
  623. "7:lbu\t$1, 1(%2)\n\t" \
  624. "dsll\t%0, 0x8\n\t" \
  625. "or\t%0, $1\n\t" \
  626. "8:lbu\t$1, 0(%2)\n\t" \
  627. "dsll\t%0, 0x8\n\t" \
  628. "or\t%0, $1\n\t" \
  629. "li\t%1, 0\n" \
  630. ".set\tpop\n\t" \
  631. "10:\n\t" \
  632. ".insn\n\t" \
  633. ".section\t.fixup,\"ax\"\n\t" \
  634. "11:\tli\t%1, %3\n\t" \
  635. "j\t10b\n\t" \
  636. ".previous\n\t" \
  637. ".section\t__ex_table,\"a\"\n\t" \
  638. STR(PTR)"\t1b, 11b\n\t" \
  639. STR(PTR)"\t2b, 11b\n\t" \
  640. STR(PTR)"\t3b, 11b\n\t" \
  641. STR(PTR)"\t4b, 11b\n\t" \
  642. STR(PTR)"\t5b, 11b\n\t" \
  643. STR(PTR)"\t6b, 11b\n\t" \
  644. STR(PTR)"\t7b, 11b\n\t" \
  645. STR(PTR)"\t8b, 11b\n\t" \
  646. ".previous" \
  647. : "=&r" (value), "=r" (res) \
  648. : "r" (addr), "i" (-EFAULT));
  649. #endif /* CONFIG_CPU_MIPSR6 */
  650. #define StoreHW(addr, value, res) \
  651. __asm__ __volatile__ ( \
  652. ".set\tnoat\n" \
  653. "1:\t"user_sb("%1", "0(%2)")"\n" \
  654. "srl\t$1,%1, 0x8\n" \
  655. "2:\t"user_sb("$1", "1(%2)")"\n" \
  656. ".set\tat\n\t" \
  657. "li\t%0, 0\n" \
  658. "3:\n\t" \
  659. ".insn\n\t" \
  660. ".section\t.fixup,\"ax\"\n\t" \
  661. "4:\tli\t%0, %3\n\t" \
  662. "j\t3b\n\t" \
  663. ".previous\n\t" \
  664. ".section\t__ex_table,\"a\"\n\t" \
  665. STR(PTR)"\t1b, 4b\n\t" \
  666. STR(PTR)"\t2b, 4b\n\t" \
  667. ".previous" \
  668. : "=r" (res) \
  669. : "r" (value), "r" (addr), "i" (-EFAULT));
  670. #ifndef CONFIG_CPU_MIPSR6
  671. #define StoreW(addr, value, res) \
  672. __asm__ __volatile__ ( \
  673. "1:\t"user_swl("%1", "3(%2)")"\n" \
  674. "2:\t"user_swr("%1", "(%2)")"\n\t" \
  675. "li\t%0, 0\n" \
  676. "3:\n\t" \
  677. ".insn\n\t" \
  678. ".section\t.fixup,\"ax\"\n\t" \
  679. "4:\tli\t%0, %3\n\t" \
  680. "j\t3b\n\t" \
  681. ".previous\n\t" \
  682. ".section\t__ex_table,\"a\"\n\t" \
  683. STR(PTR)"\t1b, 4b\n\t" \
  684. STR(PTR)"\t2b, 4b\n\t" \
  685. ".previous" \
  686. : "=r" (res) \
  687. : "r" (value), "r" (addr), "i" (-EFAULT));
  688. #define StoreDW(addr, value, res) \
  689. __asm__ __volatile__ ( \
  690. "1:\tsdl\t%1, 7(%2)\n" \
  691. "2:\tsdr\t%1, (%2)\n\t" \
  692. "li\t%0, 0\n" \
  693. "3:\n\t" \
  694. ".insn\n\t" \
  695. ".section\t.fixup,\"ax\"\n\t" \
  696. "4:\tli\t%0, %3\n\t" \
  697. "j\t3b\n\t" \
  698. ".previous\n\t" \
  699. ".section\t__ex_table,\"a\"\n\t" \
  700. STR(PTR)"\t1b, 4b\n\t" \
  701. STR(PTR)"\t2b, 4b\n\t" \
  702. ".previous" \
  703. : "=r" (res) \
  704. : "r" (value), "r" (addr), "i" (-EFAULT));
  705. #else
  706. /* MIPSR6 has no swl and sdl instructions */
  707. #define StoreW(addr, value, res) \
  708. __asm__ __volatile__ ( \
  709. ".set\tpush\n\t" \
  710. ".set\tnoat\n\t" \
  711. "1:"user_sb("%1", "0(%2)")"\n\t" \
  712. "srl\t$1, %1, 0x8\n\t" \
  713. "2:"user_sb("$1", "1(%2)")"\n\t" \
  714. "srl\t$1, $1, 0x8\n\t" \
  715. "3:"user_sb("$1", "2(%2)")"\n\t" \
  716. "srl\t$1, $1, 0x8\n\t" \
  717. "4:"user_sb("$1", "3(%2)")"\n\t" \
  718. ".set\tpop\n\t" \
  719. "li\t%0, 0\n" \
  720. "10:\n\t" \
  721. ".insn\n\t" \
  722. ".section\t.fixup,\"ax\"\n\t" \
  723. "11:\tli\t%0, %3\n\t" \
  724. "j\t10b\n\t" \
  725. ".previous\n\t" \
  726. ".section\t__ex_table,\"a\"\n\t" \
  727. STR(PTR)"\t1b, 11b\n\t" \
  728. STR(PTR)"\t2b, 11b\n\t" \
  729. STR(PTR)"\t3b, 11b\n\t" \
  730. STR(PTR)"\t4b, 11b\n\t" \
  731. ".previous" \
  732. : "=&r" (res) \
  733. : "r" (value), "r" (addr), "i" (-EFAULT) \
  734. : "memory");
  735. #define StoreDW(addr, value, res) \
  736. __asm__ __volatile__ ( \
  737. ".set\tpush\n\t" \
  738. ".set\tnoat\n\t" \
  739. "1:sb\t%1, 0(%2)\n\t" \
  740. "dsrl\t$1, %1, 0x8\n\t" \
  741. "2:sb\t$1, 1(%2)\n\t" \
  742. "dsrl\t$1, $1, 0x8\n\t" \
  743. "3:sb\t$1, 2(%2)\n\t" \
  744. "dsrl\t$1, $1, 0x8\n\t" \
  745. "4:sb\t$1, 3(%2)\n\t" \
  746. "dsrl\t$1, $1, 0x8\n\t" \
  747. "5:sb\t$1, 4(%2)\n\t" \
  748. "dsrl\t$1, $1, 0x8\n\t" \
  749. "6:sb\t$1, 5(%2)\n\t" \
  750. "dsrl\t$1, $1, 0x8\n\t" \
  751. "7:sb\t$1, 6(%2)\n\t" \
  752. "dsrl\t$1, $1, 0x8\n\t" \
  753. "8:sb\t$1, 7(%2)\n\t" \
  754. "dsrl\t$1, $1, 0x8\n\t" \
  755. ".set\tpop\n\t" \
  756. "li\t%0, 0\n" \
  757. "10:\n\t" \
  758. ".insn\n\t" \
  759. ".section\t.fixup,\"ax\"\n\t" \
  760. "11:\tli\t%0, %3\n\t" \
  761. "j\t10b\n\t" \
  762. ".previous\n\t" \
  763. ".section\t__ex_table,\"a\"\n\t" \
  764. STR(PTR)"\t1b, 11b\n\t" \
  765. STR(PTR)"\t2b, 11b\n\t" \
  766. STR(PTR)"\t3b, 11b\n\t" \
  767. STR(PTR)"\t4b, 11b\n\t" \
  768. STR(PTR)"\t5b, 11b\n\t" \
  769. STR(PTR)"\t6b, 11b\n\t" \
  770. STR(PTR)"\t7b, 11b\n\t" \
  771. STR(PTR)"\t8b, 11b\n\t" \
  772. ".previous" \
  773. : "=&r" (res) \
  774. : "r" (value), "r" (addr), "i" (-EFAULT) \
  775. : "memory");
  776. #endif /* CONFIG_CPU_MIPSR6 */
  777. #endif
  778. static void emulate_load_store_insn(struct pt_regs *regs,
  779. void __user *addr, unsigned int __user *pc)
  780. {
  781. union mips_instruction insn;
  782. unsigned long value;
  783. unsigned int res;
  784. unsigned long origpc;
  785. unsigned long orig31;
  786. void __user *fault_addr = NULL;
  787. #ifdef CONFIG_EVA
  788. mm_segment_t seg;
  789. #endif
  790. origpc = (unsigned long)pc;
  791. orig31 = regs->regs[31];
  792. perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
  793. /*
  794. * This load never faults.
  795. */
  796. __get_user(insn.word, pc);
  797. switch (insn.i_format.opcode) {
  798. /*
  799. * These are instructions that a compiler doesn't generate. We
  800. * can assume therefore that the code is MIPS-aware and
  801. * really buggy. Emulating these instructions would break the
  802. * semantics anyway.
  803. */
  804. case ll_op:
  805. case lld_op:
  806. case sc_op:
  807. case scd_op:
  808. /*
  809. * For these instructions the only way to create an address
  810. * error is an attempted access to kernel/supervisor address
  811. * space.
  812. */
  813. case ldl_op:
  814. case ldr_op:
  815. case lwl_op:
  816. case lwr_op:
  817. case sdl_op:
  818. case sdr_op:
  819. case swl_op:
  820. case swr_op:
  821. case lb_op:
  822. case lbu_op:
  823. case sb_op:
  824. goto sigbus;
  825. /*
  826. * The remaining opcodes are the ones that are really of
  827. * interest.
  828. */
  829. #ifdef CONFIG_EVA
  830. case spec3_op:
  831. /*
  832. * we can land here only from kernel accessing user memory,
  833. * so we need to "switch" the address limit to user space, so
  834. * address check can work properly.
  835. */
  836. seg = get_fs();
  837. set_fs(USER_DS);
  838. switch (insn.spec3_format.func) {
  839. case lhe_op:
  840. if (!access_ok(VERIFY_READ, addr, 2)) {
  841. set_fs(seg);
  842. goto sigbus;
  843. }
  844. LoadHW(addr, value, res);
  845. if (res) {
  846. set_fs(seg);
  847. goto fault;
  848. }
  849. compute_return_epc(regs);
  850. regs->regs[insn.spec3_format.rt] = value;
  851. break;
  852. case lwe_op:
  853. if (!access_ok(VERIFY_READ, addr, 4)) {
  854. set_fs(seg);
  855. goto sigbus;
  856. }
  857. LoadW(addr, value, res);
  858. if (res) {
  859. set_fs(seg);
  860. goto fault;
  861. }
  862. compute_return_epc(regs);
  863. regs->regs[insn.spec3_format.rt] = value;
  864. break;
  865. case lhue_op:
  866. if (!access_ok(VERIFY_READ, addr, 2)) {
  867. set_fs(seg);
  868. goto sigbus;
  869. }
  870. LoadHWU(addr, value, res);
  871. if (res) {
  872. set_fs(seg);
  873. goto fault;
  874. }
  875. compute_return_epc(regs);
  876. regs->regs[insn.spec3_format.rt] = value;
  877. break;
  878. case she_op:
  879. if (!access_ok(VERIFY_WRITE, addr, 2)) {
  880. set_fs(seg);
  881. goto sigbus;
  882. }
  883. compute_return_epc(regs);
  884. value = regs->regs[insn.spec3_format.rt];
  885. StoreHW(addr, value, res);
  886. if (res) {
  887. set_fs(seg);
  888. goto fault;
  889. }
  890. break;
  891. case swe_op:
  892. if (!access_ok(VERIFY_WRITE, addr, 4)) {
  893. set_fs(seg);
  894. goto sigbus;
  895. }
  896. compute_return_epc(regs);
  897. value = regs->regs[insn.spec3_format.rt];
  898. StoreW(addr, value, res);
  899. if (res) {
  900. set_fs(seg);
  901. goto fault;
  902. }
  903. break;
  904. default:
  905. set_fs(seg);
  906. goto sigill;
  907. }
  908. set_fs(seg);
  909. break;
  910. #endif
  911. case lh_op:
  912. if (!access_ok(VERIFY_READ, addr, 2))
  913. goto sigbus;
  914. LoadHW(addr, value, res);
  915. if (res)
  916. goto fault;
  917. compute_return_epc(regs);
  918. regs->regs[insn.i_format.rt] = value;
  919. break;
  920. case lw_op:
  921. if (!access_ok(VERIFY_READ, addr, 4))
  922. goto sigbus;
  923. LoadW(addr, value, res);
  924. if (res)
  925. goto fault;
  926. compute_return_epc(regs);
  927. regs->regs[insn.i_format.rt] = value;
  928. break;
  929. case lhu_op:
  930. if (!access_ok(VERIFY_READ, addr, 2))
  931. goto sigbus;
  932. LoadHWU(addr, value, res);
  933. if (res)
  934. goto fault;
  935. compute_return_epc(regs);
  936. regs->regs[insn.i_format.rt] = value;
  937. break;
  938. case lwu_op:
  939. #ifdef CONFIG_64BIT
  940. /*
  941. * A 32-bit kernel might be running on a 64-bit processor. But
  942. * if we're on a 32-bit processor and an i-cache incoherency
  943. * or race makes us see a 64-bit instruction here the sdl/sdr
  944. * would blow up, so for now we don't handle unaligned 64-bit
  945. * instructions on 32-bit kernels.
  946. */
  947. if (!access_ok(VERIFY_READ, addr, 4))
  948. goto sigbus;
  949. LoadWU(addr, value, res);
  950. if (res)
  951. goto fault;
  952. compute_return_epc(regs);
  953. regs->regs[insn.i_format.rt] = value;
  954. break;
  955. #endif /* CONFIG_64BIT */
  956. /* Cannot handle 64-bit instructions in 32-bit kernel */
  957. goto sigill;
  958. case ld_op:
  959. #ifdef CONFIG_64BIT
  960. /*
  961. * A 32-bit kernel might be running on a 64-bit processor. But
  962. * if we're on a 32-bit processor and an i-cache incoherency
  963. * or race makes us see a 64-bit instruction here the sdl/sdr
  964. * would blow up, so for now we don't handle unaligned 64-bit
  965. * instructions on 32-bit kernels.
  966. */
  967. if (!access_ok(VERIFY_READ, addr, 8))
  968. goto sigbus;
  969. LoadDW(addr, value, res);
  970. if (res)
  971. goto fault;
  972. compute_return_epc(regs);
  973. regs->regs[insn.i_format.rt] = value;
  974. break;
  975. #endif /* CONFIG_64BIT */
  976. /* Cannot handle 64-bit instructions in 32-bit kernel */
  977. goto sigill;
  978. case sh_op:
  979. if (!access_ok(VERIFY_WRITE, addr, 2))
  980. goto sigbus;
  981. compute_return_epc(regs);
  982. value = regs->regs[insn.i_format.rt];
  983. StoreHW(addr, value, res);
  984. if (res)
  985. goto fault;
  986. break;
  987. case sw_op:
  988. if (!access_ok(VERIFY_WRITE, addr, 4))
  989. goto sigbus;
  990. compute_return_epc(regs);
  991. value = regs->regs[insn.i_format.rt];
  992. StoreW(addr, value, res);
  993. if (res)
  994. goto fault;
  995. break;
  996. case sd_op:
  997. #ifdef CONFIG_64BIT
  998. /*
  999. * A 32-bit kernel might be running on a 64-bit processor. But
  1000. * if we're on a 32-bit processor and an i-cache incoherency
  1001. * or race makes us see a 64-bit instruction here the sdl/sdr
  1002. * would blow up, so for now we don't handle unaligned 64-bit
  1003. * instructions on 32-bit kernels.
  1004. */
  1005. if (!access_ok(VERIFY_WRITE, addr, 8))
  1006. goto sigbus;
  1007. compute_return_epc(regs);
  1008. value = regs->regs[insn.i_format.rt];
  1009. StoreDW(addr, value, res);
  1010. if (res)
  1011. goto fault;
  1012. break;
  1013. #endif /* CONFIG_64BIT */
  1014. /* Cannot handle 64-bit instructions in 32-bit kernel */
  1015. goto sigill;
  1016. case lwc1_op:
  1017. case ldc1_op:
  1018. case swc1_op:
  1019. case sdc1_op:
  1020. die_if_kernel("Unaligned FP access in kernel code", regs);
  1021. BUG_ON(!used_math());
  1022. lose_fpu(1); /* Save FPU state for the emulator. */
  1023. res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
  1024. &fault_addr);
  1025. own_fpu(1); /* Restore FPU state. */
  1026. /* Signal if something went wrong. */
  1027. process_fpemu_return(res, fault_addr);
  1028. if (res == 0)
  1029. break;
  1030. return;
  1031. #ifndef CONFIG_CPU_MIPSR6
  1032. /*
  1033. * COP2 is available to implementor for application specific use.
  1034. * It's up to applications to register a notifier chain and do
  1035. * whatever they have to do, including possible sending of signals.
  1036. *
  1037. * This instruction has been reallocated in Release 6
  1038. */
  1039. case lwc2_op:
  1040. cu2_notifier_call_chain(CU2_LWC2_OP, regs);
  1041. break;
  1042. case ldc2_op:
  1043. cu2_notifier_call_chain(CU2_LDC2_OP, regs);
  1044. break;
  1045. case swc2_op:
  1046. cu2_notifier_call_chain(CU2_SWC2_OP, regs);
  1047. break;
  1048. case sdc2_op:
  1049. cu2_notifier_call_chain(CU2_SDC2_OP, regs);
  1050. break;
  1051. #endif
  1052. default:
  1053. /*
  1054. * Pheeee... We encountered an yet unknown instruction or
  1055. * cache coherence problem. Die sucker, die ...
  1056. */
  1057. goto sigill;
  1058. }
  1059. #ifdef CONFIG_DEBUG_FS
  1060. unaligned_instructions++;
  1061. #endif
  1062. return;
  1063. fault:
  1064. /* roll back jump/branch */
  1065. regs->cp0_epc = origpc;
  1066. regs->regs[31] = orig31;
  1067. /* Did we have an exception handler installed? */
  1068. if (fixup_exception(regs))
  1069. return;
  1070. die_if_kernel("Unhandled kernel unaligned access", regs);
  1071. force_sig(SIGSEGV, current);
  1072. return;
  1073. sigbus:
  1074. die_if_kernel("Unhandled kernel unaligned access", regs);
  1075. force_sig(SIGBUS, current);
  1076. return;
  1077. sigill:
  1078. die_if_kernel
  1079. ("Unhandled kernel unaligned access or invalid instruction", regs);
  1080. force_sig(SIGILL, current);
  1081. }
  1082. /* Recode table from 16-bit register notation to 32-bit GPR. */
  1083. const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
  1084. /* Recode table from 16-bit STORE register notation to 32-bit GPR. */
  1085. const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
  1086. static void emulate_load_store_microMIPS(struct pt_regs *regs,
  1087. void __user *addr)
  1088. {
  1089. unsigned long value;
  1090. unsigned int res;
  1091. int i;
  1092. unsigned int reg = 0, rvar;
  1093. unsigned long orig31;
  1094. u16 __user *pc16;
  1095. u16 halfword;
  1096. unsigned int word;
  1097. unsigned long origpc, contpc;
  1098. union mips_instruction insn;
  1099. struct mm_decoded_insn mminsn;
  1100. void __user *fault_addr = NULL;
  1101. origpc = regs->cp0_epc;
  1102. orig31 = regs->regs[31];
  1103. mminsn.micro_mips_mode = 1;
  1104. /*
  1105. * This load never faults.
  1106. */
  1107. pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
  1108. __get_user(halfword, pc16);
  1109. pc16++;
  1110. contpc = regs->cp0_epc + 2;
  1111. word = ((unsigned int)halfword << 16);
  1112. mminsn.pc_inc = 2;
  1113. if (!mm_insn_16bit(halfword)) {
  1114. __get_user(halfword, pc16);
  1115. pc16++;
  1116. contpc = regs->cp0_epc + 4;
  1117. mminsn.pc_inc = 4;
  1118. word |= halfword;
  1119. }
  1120. mminsn.insn = word;
  1121. if (get_user(halfword, pc16))
  1122. goto fault;
  1123. mminsn.next_pc_inc = 2;
  1124. word = ((unsigned int)halfword << 16);
  1125. if (!mm_insn_16bit(halfword)) {
  1126. pc16++;
  1127. if (get_user(halfword, pc16))
  1128. goto fault;
  1129. mminsn.next_pc_inc = 4;
  1130. word |= halfword;
  1131. }
  1132. mminsn.next_insn = word;
  1133. insn = (union mips_instruction)(mminsn.insn);
  1134. if (mm_isBranchInstr(regs, mminsn, &contpc))
  1135. insn = (union mips_instruction)(mminsn.next_insn);
  1136. /* Parse instruction to find what to do */
  1137. switch (insn.mm_i_format.opcode) {
  1138. case mm_pool32a_op:
  1139. switch (insn.mm_x_format.func) {
  1140. case mm_lwxs_op:
  1141. reg = insn.mm_x_format.rd;
  1142. goto loadW;
  1143. }
  1144. goto sigbus;
  1145. case mm_pool32b_op:
  1146. switch (insn.mm_m_format.func) {
  1147. case mm_lwp_func:
  1148. reg = insn.mm_m_format.rd;
  1149. if (reg == 31)
  1150. goto sigbus;
  1151. if (!access_ok(VERIFY_READ, addr, 8))
  1152. goto sigbus;
  1153. LoadW(addr, value, res);
  1154. if (res)
  1155. goto fault;
  1156. regs->regs[reg] = value;
  1157. addr += 4;
  1158. LoadW(addr, value, res);
  1159. if (res)
  1160. goto fault;
  1161. regs->regs[reg + 1] = value;
  1162. goto success;
  1163. case mm_swp_func:
  1164. reg = insn.mm_m_format.rd;
  1165. if (reg == 31)
  1166. goto sigbus;
  1167. if (!access_ok(VERIFY_WRITE, addr, 8))
  1168. goto sigbus;
  1169. value = regs->regs[reg];
  1170. StoreW(addr, value, res);
  1171. if (res)
  1172. goto fault;
  1173. addr += 4;
  1174. value = regs->regs[reg + 1];
  1175. StoreW(addr, value, res);
  1176. if (res)
  1177. goto fault;
  1178. goto success;
  1179. case mm_ldp_func:
  1180. #ifdef CONFIG_64BIT
  1181. reg = insn.mm_m_format.rd;
  1182. if (reg == 31)
  1183. goto sigbus;
  1184. if (!access_ok(VERIFY_READ, addr, 16))
  1185. goto sigbus;
  1186. LoadDW(addr, value, res);
  1187. if (res)
  1188. goto fault;
  1189. regs->regs[reg] = value;
  1190. addr += 8;
  1191. LoadDW(addr, value, res);
  1192. if (res)
  1193. goto fault;
  1194. regs->regs[reg + 1] = value;
  1195. goto success;
  1196. #endif /* CONFIG_64BIT */
  1197. goto sigill;
  1198. case mm_sdp_func:
  1199. #ifdef CONFIG_64BIT
  1200. reg = insn.mm_m_format.rd;
  1201. if (reg == 31)
  1202. goto sigbus;
  1203. if (!access_ok(VERIFY_WRITE, addr, 16))
  1204. goto sigbus;
  1205. value = regs->regs[reg];
  1206. StoreDW(addr, value, res);
  1207. if (res)
  1208. goto fault;
  1209. addr += 8;
  1210. value = regs->regs[reg + 1];
  1211. StoreDW(addr, value, res);
  1212. if (res)
  1213. goto fault;
  1214. goto success;
  1215. #endif /* CONFIG_64BIT */
  1216. goto sigill;
  1217. case mm_lwm32_func:
  1218. reg = insn.mm_m_format.rd;
  1219. rvar = reg & 0xf;
  1220. if ((rvar > 9) || !reg)
  1221. goto sigill;
  1222. if (reg & 0x10) {
  1223. if (!access_ok
  1224. (VERIFY_READ, addr, 4 * (rvar + 1)))
  1225. goto sigbus;
  1226. } else {
  1227. if (!access_ok(VERIFY_READ, addr, 4 * rvar))
  1228. goto sigbus;
  1229. }
  1230. if (rvar == 9)
  1231. rvar = 8;
  1232. for (i = 16; rvar; rvar--, i++) {
  1233. LoadW(addr, value, res);
  1234. if (res)
  1235. goto fault;
  1236. addr += 4;
  1237. regs->regs[i] = value;
  1238. }
  1239. if ((reg & 0xf) == 9) {
  1240. LoadW(addr, value, res);
  1241. if (res)
  1242. goto fault;
  1243. addr += 4;
  1244. regs->regs[30] = value;
  1245. }
  1246. if (reg & 0x10) {
  1247. LoadW(addr, value, res);
  1248. if (res)
  1249. goto fault;
  1250. regs->regs[31] = value;
  1251. }
  1252. goto success;
  1253. case mm_swm32_func:
  1254. reg = insn.mm_m_format.rd;
  1255. rvar = reg & 0xf;
  1256. if ((rvar > 9) || !reg)
  1257. goto sigill;
  1258. if (reg & 0x10) {
  1259. if (!access_ok
  1260. (VERIFY_WRITE, addr, 4 * (rvar + 1)))
  1261. goto sigbus;
  1262. } else {
  1263. if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
  1264. goto sigbus;
  1265. }
  1266. if (rvar == 9)
  1267. rvar = 8;
  1268. for (i = 16; rvar; rvar--, i++) {
  1269. value = regs->regs[i];
  1270. StoreW(addr, value, res);
  1271. if (res)
  1272. goto fault;
  1273. addr += 4;
  1274. }
  1275. if ((reg & 0xf) == 9) {
  1276. value = regs->regs[30];
  1277. StoreW(addr, value, res);
  1278. if (res)
  1279. goto fault;
  1280. addr += 4;
  1281. }
  1282. if (reg & 0x10) {
  1283. value = regs->regs[31];
  1284. StoreW(addr, value, res);
  1285. if (res)
  1286. goto fault;
  1287. }
  1288. goto success;
  1289. case mm_ldm_func:
  1290. #ifdef CONFIG_64BIT
  1291. reg = insn.mm_m_format.rd;
  1292. rvar = reg & 0xf;
  1293. if ((rvar > 9) || !reg)
  1294. goto sigill;
  1295. if (reg & 0x10) {
  1296. if (!access_ok
  1297. (VERIFY_READ, addr, 8 * (rvar + 1)))
  1298. goto sigbus;
  1299. } else {
  1300. if (!access_ok(VERIFY_READ, addr, 8 * rvar))
  1301. goto sigbus;
  1302. }
  1303. if (rvar == 9)
  1304. rvar = 8;
  1305. for (i = 16; rvar; rvar--, i++) {
  1306. LoadDW(addr, value, res);
  1307. if (res)
  1308. goto fault;
  1309. addr += 4;
  1310. regs->regs[i] = value;
  1311. }
  1312. if ((reg & 0xf) == 9) {
  1313. LoadDW(addr, value, res);
  1314. if (res)
  1315. goto fault;
  1316. addr += 8;
  1317. regs->regs[30] = value;
  1318. }
  1319. if (reg & 0x10) {
  1320. LoadDW(addr, value, res);
  1321. if (res)
  1322. goto fault;
  1323. regs->regs[31] = value;
  1324. }
  1325. goto success;
  1326. #endif /* CONFIG_64BIT */
  1327. goto sigill;
  1328. case mm_sdm_func:
  1329. #ifdef CONFIG_64BIT
  1330. reg = insn.mm_m_format.rd;
  1331. rvar = reg & 0xf;
  1332. if ((rvar > 9) || !reg)
  1333. goto sigill;
  1334. if (reg & 0x10) {
  1335. if (!access_ok
  1336. (VERIFY_WRITE, addr, 8 * (rvar + 1)))
  1337. goto sigbus;
  1338. } else {
  1339. if (!access_ok(VERIFY_WRITE, addr, 8 * rvar))
  1340. goto sigbus;
  1341. }
  1342. if (rvar == 9)
  1343. rvar = 8;
  1344. for (i = 16; rvar; rvar--, i++) {
  1345. value = regs->regs[i];
  1346. StoreDW(addr, value, res);
  1347. if (res)
  1348. goto fault;
  1349. addr += 8;
  1350. }
  1351. if ((reg & 0xf) == 9) {
  1352. value = regs->regs[30];
  1353. StoreDW(addr, value, res);
  1354. if (res)
  1355. goto fault;
  1356. addr += 8;
  1357. }
  1358. if (reg & 0x10) {
  1359. value = regs->regs[31];
  1360. StoreDW(addr, value, res);
  1361. if (res)
  1362. goto fault;
  1363. }
  1364. goto success;
  1365. #endif /* CONFIG_64BIT */
  1366. goto sigill;
  1367. /* LWC2, SWC2, LDC2, SDC2 are not serviced */
  1368. }
  1369. goto sigbus;
  1370. case mm_pool32c_op:
  1371. switch (insn.mm_m_format.func) {
  1372. case mm_lwu_func:
  1373. reg = insn.mm_m_format.rd;
  1374. goto loadWU;
  1375. }
  1376. /* LL,SC,LLD,SCD are not serviced */
  1377. goto sigbus;
  1378. case mm_pool32f_op:
  1379. switch (insn.mm_x_format.func) {
  1380. case mm_lwxc1_func:
  1381. case mm_swxc1_func:
  1382. case mm_ldxc1_func:
  1383. case mm_sdxc1_func:
  1384. goto fpu_emul;
  1385. }
  1386. goto sigbus;
  1387. case mm_ldc132_op:
  1388. case mm_sdc132_op:
  1389. case mm_lwc132_op:
  1390. case mm_swc132_op:
  1391. fpu_emul:
  1392. /* roll back jump/branch */
  1393. regs->cp0_epc = origpc;
  1394. regs->regs[31] = orig31;
  1395. die_if_kernel("Unaligned FP access in kernel code", regs);
  1396. BUG_ON(!used_math());
  1397. BUG_ON(!is_fpu_owner());
  1398. lose_fpu(1); /* save the FPU state for the emulator */
  1399. res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
  1400. &fault_addr);
  1401. own_fpu(1); /* restore FPU state */
  1402. /* If something went wrong, signal */
  1403. process_fpemu_return(res, fault_addr);
  1404. if (res == 0)
  1405. goto success;
  1406. return;
  1407. case mm_lh32_op:
  1408. reg = insn.mm_i_format.rt;
  1409. goto loadHW;
  1410. case mm_lhu32_op:
  1411. reg = insn.mm_i_format.rt;
  1412. goto loadHWU;
  1413. case mm_lw32_op:
  1414. reg = insn.mm_i_format.rt;
  1415. goto loadW;
  1416. case mm_sh32_op:
  1417. reg = insn.mm_i_format.rt;
  1418. goto storeHW;
  1419. case mm_sw32_op:
  1420. reg = insn.mm_i_format.rt;
  1421. goto storeW;
  1422. case mm_ld32_op:
  1423. reg = insn.mm_i_format.rt;
  1424. goto loadDW;
  1425. case mm_sd32_op:
  1426. reg = insn.mm_i_format.rt;
  1427. goto storeDW;
  1428. case mm_pool16c_op:
  1429. switch (insn.mm16_m_format.func) {
  1430. case mm_lwm16_op:
  1431. reg = insn.mm16_m_format.rlist;
  1432. rvar = reg + 1;
  1433. if (!access_ok(VERIFY_READ, addr, 4 * rvar))
  1434. goto sigbus;
  1435. for (i = 16; rvar; rvar--, i++) {
  1436. LoadW(addr, value, res);
  1437. if (res)
  1438. goto fault;
  1439. addr += 4;
  1440. regs->regs[i] = value;
  1441. }
  1442. LoadW(addr, value, res);
  1443. if (res)
  1444. goto fault;
  1445. regs->regs[31] = value;
  1446. goto success;
  1447. case mm_swm16_op:
  1448. reg = insn.mm16_m_format.rlist;
  1449. rvar = reg + 1;
  1450. if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
  1451. goto sigbus;
  1452. for (i = 16; rvar; rvar--, i++) {
  1453. value = regs->regs[i];
  1454. StoreW(addr, value, res);
  1455. if (res)
  1456. goto fault;
  1457. addr += 4;
  1458. }
  1459. value = regs->regs[31];
  1460. StoreW(addr, value, res);
  1461. if (res)
  1462. goto fault;
  1463. goto success;
  1464. }
  1465. goto sigbus;
  1466. case mm_lhu16_op:
  1467. reg = reg16to32[insn.mm16_rb_format.rt];
  1468. goto loadHWU;
  1469. case mm_lw16_op:
  1470. reg = reg16to32[insn.mm16_rb_format.rt];
  1471. goto loadW;
  1472. case mm_sh16_op:
  1473. reg = reg16to32st[insn.mm16_rb_format.rt];
  1474. goto storeHW;
  1475. case mm_sw16_op:
  1476. reg = reg16to32st[insn.mm16_rb_format.rt];
  1477. goto storeW;
  1478. case mm_lwsp16_op:
  1479. reg = insn.mm16_r5_format.rt;
  1480. goto loadW;
  1481. case mm_swsp16_op:
  1482. reg = insn.mm16_r5_format.rt;
  1483. goto storeW;
  1484. case mm_lwgp16_op:
  1485. reg = reg16to32[insn.mm16_r3_format.rt];
  1486. goto loadW;
  1487. default:
  1488. goto sigill;
  1489. }
  1490. loadHW:
  1491. if (!access_ok(VERIFY_READ, addr, 2))
  1492. goto sigbus;
  1493. LoadHW(addr, value, res);
  1494. if (res)
  1495. goto fault;
  1496. regs->regs[reg] = value;
  1497. goto success;
  1498. loadHWU:
  1499. if (!access_ok(VERIFY_READ, addr, 2))
  1500. goto sigbus;
  1501. LoadHWU(addr, value, res);
  1502. if (res)
  1503. goto fault;
  1504. regs->regs[reg] = value;
  1505. goto success;
  1506. loadW:
  1507. if (!access_ok(VERIFY_READ, addr, 4))
  1508. goto sigbus;
  1509. LoadW(addr, value, res);
  1510. if (res)
  1511. goto fault;
  1512. regs->regs[reg] = value;
  1513. goto success;
  1514. loadWU:
  1515. #ifdef CONFIG_64BIT
  1516. /*
  1517. * A 32-bit kernel might be running on a 64-bit processor. But
  1518. * if we're on a 32-bit processor and an i-cache incoherency
  1519. * or race makes us see a 64-bit instruction here the sdl/sdr
  1520. * would blow up, so for now we don't handle unaligned 64-bit
  1521. * instructions on 32-bit kernels.
  1522. */
  1523. if (!access_ok(VERIFY_READ, addr, 4))
  1524. goto sigbus;
  1525. LoadWU(addr, value, res);
  1526. if (res)
  1527. goto fault;
  1528. regs->regs[reg] = value;
  1529. goto success;
  1530. #endif /* CONFIG_64BIT */
  1531. /* Cannot handle 64-bit instructions in 32-bit kernel */
  1532. goto sigill;
  1533. loadDW:
  1534. #ifdef CONFIG_64BIT
  1535. /*
  1536. * A 32-bit kernel might be running on a 64-bit processor. But
  1537. * if we're on a 32-bit processor and an i-cache incoherency
  1538. * or race makes us see a 64-bit instruction here the sdl/sdr
  1539. * would blow up, so for now we don't handle unaligned 64-bit
  1540. * instructions on 32-bit kernels.
  1541. */
  1542. if (!access_ok(VERIFY_READ, addr, 8))
  1543. goto sigbus;
  1544. LoadDW(addr, value, res);
  1545. if (res)
  1546. goto fault;
  1547. regs->regs[reg] = value;
  1548. goto success;
  1549. #endif /* CONFIG_64BIT */
  1550. /* Cannot handle 64-bit instructions in 32-bit kernel */
  1551. goto sigill;
  1552. storeHW:
  1553. if (!access_ok(VERIFY_WRITE, addr, 2))
  1554. goto sigbus;
  1555. value = regs->regs[reg];
  1556. StoreHW(addr, value, res);
  1557. if (res)
  1558. goto fault;
  1559. goto success;
  1560. storeW:
  1561. if (!access_ok(VERIFY_WRITE, addr, 4))
  1562. goto sigbus;
  1563. value = regs->regs[reg];
  1564. StoreW(addr, value, res);
  1565. if (res)
  1566. goto fault;
  1567. goto success;
  1568. storeDW:
  1569. #ifdef CONFIG_64BIT
  1570. /*
  1571. * A 32-bit kernel might be running on a 64-bit processor. But
  1572. * if we're on a 32-bit processor and an i-cache incoherency
  1573. * or race makes us see a 64-bit instruction here the sdl/sdr
  1574. * would blow up, so for now we don't handle unaligned 64-bit
  1575. * instructions on 32-bit kernels.
  1576. */
  1577. if (!access_ok(VERIFY_WRITE, addr, 8))
  1578. goto sigbus;
  1579. value = regs->regs[reg];
  1580. StoreDW(addr, value, res);
  1581. if (res)
  1582. goto fault;
  1583. goto success;
  1584. #endif /* CONFIG_64BIT */
  1585. /* Cannot handle 64-bit instructions in 32-bit kernel */
  1586. goto sigill;
  1587. success:
  1588. regs->cp0_epc = contpc; /* advance or branch */
  1589. #ifdef CONFIG_DEBUG_FS
  1590. unaligned_instructions++;
  1591. #endif
  1592. return;
  1593. fault:
  1594. /* roll back jump/branch */
  1595. regs->cp0_epc = origpc;
  1596. regs->regs[31] = orig31;
  1597. /* Did we have an exception handler installed? */
  1598. if (fixup_exception(regs))
  1599. return;
  1600. die_if_kernel("Unhandled kernel unaligned access", regs);
  1601. force_sig(SIGSEGV, current);
  1602. return;
  1603. sigbus:
  1604. die_if_kernel("Unhandled kernel unaligned access", regs);
  1605. force_sig(SIGBUS, current);
  1606. return;
  1607. sigill:
  1608. die_if_kernel
  1609. ("Unhandled kernel unaligned access or invalid instruction", regs);
  1610. force_sig(SIGILL, current);
  1611. }
  1612. static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
  1613. {
  1614. unsigned long value;
  1615. unsigned int res;
  1616. int reg;
  1617. unsigned long orig31;
  1618. u16 __user *pc16;
  1619. unsigned long origpc;
  1620. union mips16e_instruction mips16inst, oldinst;
  1621. origpc = regs->cp0_epc;
  1622. orig31 = regs->regs[31];
  1623. pc16 = (unsigned short __user *)msk_isa16_mode(origpc);
  1624. /*
  1625. * This load never faults.
  1626. */
  1627. __get_user(mips16inst.full, pc16);
  1628. oldinst = mips16inst;
  1629. /* skip EXTEND instruction */
  1630. if (mips16inst.ri.opcode == MIPS16e_extend_op) {
  1631. pc16++;
  1632. __get_user(mips16inst.full, pc16);
  1633. } else if (delay_slot(regs)) {
  1634. /* skip jump instructions */
  1635. /* JAL/JALX are 32 bits but have OPCODE in first short int */
  1636. if (mips16inst.ri.opcode == MIPS16e_jal_op)
  1637. pc16++;
  1638. pc16++;
  1639. if (get_user(mips16inst.full, pc16))
  1640. goto sigbus;
  1641. }
  1642. switch (mips16inst.ri.opcode) {
  1643. case MIPS16e_i64_op: /* I64 or RI64 instruction */
  1644. switch (mips16inst.i64.func) { /* I64/RI64 func field check */
  1645. case MIPS16e_ldpc_func:
  1646. case MIPS16e_ldsp_func:
  1647. reg = reg16to32[mips16inst.ri64.ry];
  1648. goto loadDW;
  1649. case MIPS16e_sdsp_func:
  1650. reg = reg16to32[mips16inst.ri64.ry];
  1651. goto writeDW;
  1652. case MIPS16e_sdrasp_func:
  1653. reg = 29; /* GPRSP */
  1654. goto writeDW;
  1655. }
  1656. goto sigbus;
  1657. case MIPS16e_swsp_op:
  1658. case MIPS16e_lwpc_op:
  1659. case MIPS16e_lwsp_op:
  1660. reg = reg16to32[mips16inst.ri.rx];
  1661. break;
  1662. case MIPS16e_i8_op:
  1663. if (mips16inst.i8.func != MIPS16e_swrasp_func)
  1664. goto sigbus;
  1665. reg = 29; /* GPRSP */
  1666. break;
  1667. default:
  1668. reg = reg16to32[mips16inst.rri.ry];
  1669. break;
  1670. }
  1671. switch (mips16inst.ri.opcode) {
  1672. case MIPS16e_lb_op:
  1673. case MIPS16e_lbu_op:
  1674. case MIPS16e_sb_op:
  1675. goto sigbus;
  1676. case MIPS16e_lh_op:
  1677. if (!access_ok(VERIFY_READ, addr, 2))
  1678. goto sigbus;
  1679. LoadHW(addr, value, res);
  1680. if (res)
  1681. goto fault;
  1682. MIPS16e_compute_return_epc(regs, &oldinst);
  1683. regs->regs[reg] = value;
  1684. break;
  1685. case MIPS16e_lhu_op:
  1686. if (!access_ok(VERIFY_READ, addr, 2))
  1687. goto sigbus;
  1688. LoadHWU(addr, value, res);
  1689. if (res)
  1690. goto fault;
  1691. MIPS16e_compute_return_epc(regs, &oldinst);
  1692. regs->regs[reg] = value;
  1693. break;
  1694. case MIPS16e_lw_op:
  1695. case MIPS16e_lwpc_op:
  1696. case MIPS16e_lwsp_op:
  1697. if (!access_ok(VERIFY_READ, addr, 4))
  1698. goto sigbus;
  1699. LoadW(addr, value, res);
  1700. if (res)
  1701. goto fault;
  1702. MIPS16e_compute_return_epc(regs, &oldinst);
  1703. regs->regs[reg] = value;
  1704. break;
  1705. case MIPS16e_lwu_op:
  1706. #ifdef CONFIG_64BIT
  1707. /*
  1708. * A 32-bit kernel might be running on a 64-bit processor. But
  1709. * if we're on a 32-bit processor and an i-cache incoherency
  1710. * or race makes us see a 64-bit instruction here the sdl/sdr
  1711. * would blow up, so for now we don't handle unaligned 64-bit
  1712. * instructions on 32-bit kernels.
  1713. */
  1714. if (!access_ok(VERIFY_READ, addr, 4))
  1715. goto sigbus;
  1716. LoadWU(addr, value, res);
  1717. if (res)
  1718. goto fault;
  1719. MIPS16e_compute_return_epc(regs, &oldinst);
  1720. regs->regs[reg] = value;
  1721. break;
  1722. #endif /* CONFIG_64BIT */
  1723. /* Cannot handle 64-bit instructions in 32-bit kernel */
  1724. goto sigill;
  1725. case MIPS16e_ld_op:
  1726. loadDW:
  1727. #ifdef CONFIG_64BIT
  1728. /*
  1729. * A 32-bit kernel might be running on a 64-bit processor. But
  1730. * if we're on a 32-bit processor and an i-cache incoherency
  1731. * or race makes us see a 64-bit instruction here the sdl/sdr
  1732. * would blow up, so for now we don't handle unaligned 64-bit
  1733. * instructions on 32-bit kernels.
  1734. */
  1735. if (!access_ok(VERIFY_READ, addr, 8))
  1736. goto sigbus;
  1737. LoadDW(addr, value, res);
  1738. if (res)
  1739. goto fault;
  1740. MIPS16e_compute_return_epc(regs, &oldinst);
  1741. regs->regs[reg] = value;
  1742. break;
  1743. #endif /* CONFIG_64BIT */
  1744. /* Cannot handle 64-bit instructions in 32-bit kernel */
  1745. goto sigill;
  1746. case MIPS16e_sh_op:
  1747. if (!access_ok(VERIFY_WRITE, addr, 2))
  1748. goto sigbus;
  1749. MIPS16e_compute_return_epc(regs, &oldinst);
  1750. value = regs->regs[reg];
  1751. StoreHW(addr, value, res);
  1752. if (res)
  1753. goto fault;
  1754. break;
  1755. case MIPS16e_sw_op:
  1756. case MIPS16e_swsp_op:
  1757. case MIPS16e_i8_op: /* actually - MIPS16e_swrasp_func */
  1758. if (!access_ok(VERIFY_WRITE, addr, 4))
  1759. goto sigbus;
  1760. MIPS16e_compute_return_epc(regs, &oldinst);
  1761. value = regs->regs[reg];
  1762. StoreW(addr, value, res);
  1763. if (res)
  1764. goto fault;
  1765. break;
  1766. case MIPS16e_sd_op:
  1767. writeDW:
  1768. #ifdef CONFIG_64BIT
  1769. /*
  1770. * A 32-bit kernel might be running on a 64-bit processor. But
  1771. * if we're on a 32-bit processor and an i-cache incoherency
  1772. * or race makes us see a 64-bit instruction here the sdl/sdr
  1773. * would blow up, so for now we don't handle unaligned 64-bit
  1774. * instructions on 32-bit kernels.
  1775. */
  1776. if (!access_ok(VERIFY_WRITE, addr, 8))
  1777. goto sigbus;
  1778. MIPS16e_compute_return_epc(regs, &oldinst);
  1779. value = regs->regs[reg];
  1780. StoreDW(addr, value, res);
  1781. if (res)
  1782. goto fault;
  1783. break;
  1784. #endif /* CONFIG_64BIT */
  1785. /* Cannot handle 64-bit instructions in 32-bit kernel */
  1786. goto sigill;
  1787. default:
  1788. /*
  1789. * Pheeee... We encountered an yet unknown instruction or
  1790. * cache coherence problem. Die sucker, die ...
  1791. */
  1792. goto sigill;
  1793. }
  1794. #ifdef CONFIG_DEBUG_FS
  1795. unaligned_instructions++;
  1796. #endif
  1797. return;
  1798. fault:
  1799. /* roll back jump/branch */
  1800. regs->cp0_epc = origpc;
  1801. regs->regs[31] = orig31;
  1802. /* Did we have an exception handler installed? */
  1803. if (fixup_exception(regs))
  1804. return;
  1805. die_if_kernel("Unhandled kernel unaligned access", regs);
  1806. force_sig(SIGSEGV, current);
  1807. return;
  1808. sigbus:
  1809. die_if_kernel("Unhandled kernel unaligned access", regs);
  1810. force_sig(SIGBUS, current);
  1811. return;
  1812. sigill:
  1813. die_if_kernel
  1814. ("Unhandled kernel unaligned access or invalid instruction", regs);
  1815. force_sig(SIGILL, current);
  1816. }
  1817. asmlinkage void do_ade(struct pt_regs *regs)
  1818. {
  1819. enum ctx_state prev_state;
  1820. unsigned int __user *pc;
  1821. mm_segment_t seg;
  1822. prev_state = exception_enter();
  1823. perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
  1824. 1, regs, regs->cp0_badvaddr);
  1825. /*
  1826. * Did we catch a fault trying to load an instruction?
  1827. */
  1828. if (regs->cp0_badvaddr == regs->cp0_epc)
  1829. goto sigbus;
  1830. if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
  1831. goto sigbus;
  1832. if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
  1833. goto sigbus;
  1834. /*
  1835. * Do branch emulation only if we didn't forward the exception.
  1836. * This is all so but ugly ...
  1837. */
  1838. /*
  1839. * Are we running in microMIPS mode?
  1840. */
  1841. if (get_isa16_mode(regs->cp0_epc)) {
  1842. /*
  1843. * Did we catch a fault trying to load an instruction in
  1844. * 16-bit mode?
  1845. */
  1846. if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc))
  1847. goto sigbus;
  1848. if (unaligned_action == UNALIGNED_ACTION_SHOW)
  1849. show_registers(regs);
  1850. if (cpu_has_mmips) {
  1851. seg = get_fs();
  1852. if (!user_mode(regs))
  1853. set_fs(KERNEL_DS);
  1854. emulate_load_store_microMIPS(regs,
  1855. (void __user *)regs->cp0_badvaddr);
  1856. set_fs(seg);
  1857. return;
  1858. }
  1859. if (cpu_has_mips16) {
  1860. seg = get_fs();
  1861. if (!user_mode(regs))
  1862. set_fs(KERNEL_DS);
  1863. emulate_load_store_MIPS16e(regs,
  1864. (void __user *)regs->cp0_badvaddr);
  1865. set_fs(seg);
  1866. return;
  1867. }
  1868. goto sigbus;
  1869. }
  1870. if (unaligned_action == UNALIGNED_ACTION_SHOW)
  1871. show_registers(regs);
  1872. pc = (unsigned int __user *)exception_epc(regs);
  1873. seg = get_fs();
  1874. if (!user_mode(regs))
  1875. set_fs(KERNEL_DS);
  1876. emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
  1877. set_fs(seg);
  1878. return;
  1879. sigbus:
  1880. die_if_kernel("Kernel unaligned instruction access", regs);
  1881. force_sig(SIGBUS, current);
  1882. /*
  1883. * XXX On return from the signal handler we should advance the epc
  1884. */
  1885. exception_exit(prev_state);
  1886. }
  1887. #ifdef CONFIG_DEBUG_FS
  1888. extern struct dentry *mips_debugfs_dir;
  1889. static int __init debugfs_unaligned(void)
  1890. {
  1891. struct dentry *d;
  1892. if (!mips_debugfs_dir)
  1893. return -ENODEV;
  1894. d = debugfs_create_u32("unaligned_instructions", S_IRUGO,
  1895. mips_debugfs_dir, &unaligned_instructions);
  1896. if (!d)
  1897. return -ENOMEM;
  1898. d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
  1899. mips_debugfs_dir, &unaligned_action);
  1900. if (!d)
  1901. return -ENOMEM;
  1902. return 0;
  1903. }
  1904. __initcall(debugfs_unaligned);
  1905. #endif