ptrace.c 87 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321
  1. /*
  2. * PowerPC version
  3. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  4. *
  5. * Derived from "arch/m68k/kernel/ptrace.c"
  6. * Copyright (C) 1994 by Hamish Macdonald
  7. * Taken from linux/kernel/ptrace.c and modified for M680x0.
  8. * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
  9. *
  10. * Modified by Cort Dougan (cort@hq.fsmlabs.com)
  11. * and Paul Mackerras (paulus@samba.org).
  12. *
  13. * This file is subject to the terms and conditions of the GNU General
  14. * Public License. See the file README.legal in the main directory of
  15. * this archive for more details.
  16. */
  17. #include <linux/kernel.h>
  18. #include <linux/sched.h>
  19. #include <linux/mm.h>
  20. #include <linux/smp.h>
  21. #include <linux/errno.h>
  22. #include <linux/ptrace.h>
  23. #include <linux/regset.h>
  24. #include <linux/tracehook.h>
  25. #include <linux/elf.h>
  26. #include <linux/user.h>
  27. #include <linux/security.h>
  28. #include <linux/signal.h>
  29. #include <linux/seccomp.h>
  30. #include <linux/audit.h>
  31. #include <trace/syscall.h>
  32. #include <linux/hw_breakpoint.h>
  33. #include <linux/perf_event.h>
  34. #include <linux/context_tracking.h>
  35. #include <linux/uaccess.h>
  36. #include <linux/pkeys.h>
  37. #include <asm/page.h>
  38. #include <asm/pgtable.h>
  39. #include <asm/switch_to.h>
  40. #include <asm/tm.h>
  41. #include <asm/asm-prototypes.h>
  42. #define CREATE_TRACE_POINTS
  43. #include <trace/events/syscalls.h>
  44. /*
  45. * The parameter save area on the stack is used to store arguments being passed
  46. * to callee function and is located at fixed offset from stack pointer.
  47. */
  48. #ifdef CONFIG_PPC32
  49. #define PARAMETER_SAVE_AREA_OFFSET 24 /* bytes */
  50. #else /* CONFIG_PPC32 */
  51. #define PARAMETER_SAVE_AREA_OFFSET 48 /* bytes */
  52. #endif
  53. struct pt_regs_offset {
  54. const char *name;
  55. int offset;
  56. };
  57. #define STR(s) #s /* convert to string */
  58. #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
  59. #define GPR_OFFSET_NAME(num) \
  60. {.name = STR(r##num), .offset = offsetof(struct pt_regs, gpr[num])}, \
  61. {.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
  62. #define REG_OFFSET_END {.name = NULL, .offset = 0}
  63. #define TVSO(f) (offsetof(struct thread_vr_state, f))
  64. #define TFSO(f) (offsetof(struct thread_fp_state, f))
  65. #define TSO(f) (offsetof(struct thread_struct, f))
  66. static const struct pt_regs_offset regoffset_table[] = {
  67. GPR_OFFSET_NAME(0),
  68. GPR_OFFSET_NAME(1),
  69. GPR_OFFSET_NAME(2),
  70. GPR_OFFSET_NAME(3),
  71. GPR_OFFSET_NAME(4),
  72. GPR_OFFSET_NAME(5),
  73. GPR_OFFSET_NAME(6),
  74. GPR_OFFSET_NAME(7),
  75. GPR_OFFSET_NAME(8),
  76. GPR_OFFSET_NAME(9),
  77. GPR_OFFSET_NAME(10),
  78. GPR_OFFSET_NAME(11),
  79. GPR_OFFSET_NAME(12),
  80. GPR_OFFSET_NAME(13),
  81. GPR_OFFSET_NAME(14),
  82. GPR_OFFSET_NAME(15),
  83. GPR_OFFSET_NAME(16),
  84. GPR_OFFSET_NAME(17),
  85. GPR_OFFSET_NAME(18),
  86. GPR_OFFSET_NAME(19),
  87. GPR_OFFSET_NAME(20),
  88. GPR_OFFSET_NAME(21),
  89. GPR_OFFSET_NAME(22),
  90. GPR_OFFSET_NAME(23),
  91. GPR_OFFSET_NAME(24),
  92. GPR_OFFSET_NAME(25),
  93. GPR_OFFSET_NAME(26),
  94. GPR_OFFSET_NAME(27),
  95. GPR_OFFSET_NAME(28),
  96. GPR_OFFSET_NAME(29),
  97. GPR_OFFSET_NAME(30),
  98. GPR_OFFSET_NAME(31),
  99. REG_OFFSET_NAME(nip),
  100. REG_OFFSET_NAME(msr),
  101. REG_OFFSET_NAME(ctr),
  102. REG_OFFSET_NAME(link),
  103. REG_OFFSET_NAME(xer),
  104. REG_OFFSET_NAME(ccr),
  105. #ifdef CONFIG_PPC64
  106. REG_OFFSET_NAME(softe),
  107. #else
  108. REG_OFFSET_NAME(mq),
  109. #endif
  110. REG_OFFSET_NAME(trap),
  111. REG_OFFSET_NAME(dar),
  112. REG_OFFSET_NAME(dsisr),
  113. REG_OFFSET_END,
  114. };
  115. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  116. static void flush_tmregs_to_thread(struct task_struct *tsk)
  117. {
  118. /*
  119. * If task is not current, it will have been flushed already to
  120. * it's thread_struct during __switch_to().
  121. *
  122. * A reclaim flushes ALL the state or if not in TM save TM SPRs
  123. * in the appropriate thread structures from live.
  124. */
  125. if ((!cpu_has_feature(CPU_FTR_TM)) || (tsk != current))
  126. return;
  127. if (MSR_TM_SUSPENDED(mfmsr())) {
  128. tm_reclaim_current(TM_CAUSE_SIGNAL);
  129. } else {
  130. tm_enable();
  131. tm_save_sprs(&(tsk->thread));
  132. }
  133. }
  134. #else
  135. static inline void flush_tmregs_to_thread(struct task_struct *tsk) { }
  136. #endif
  137. /**
  138. * regs_query_register_offset() - query register offset from its name
  139. * @name: the name of a register
  140. *
  141. * regs_query_register_offset() returns the offset of a register in struct
  142. * pt_regs from its name. If the name is invalid, this returns -EINVAL;
  143. */
  144. int regs_query_register_offset(const char *name)
  145. {
  146. const struct pt_regs_offset *roff;
  147. for (roff = regoffset_table; roff->name != NULL; roff++)
  148. if (!strcmp(roff->name, name))
  149. return roff->offset;
  150. return -EINVAL;
  151. }
  152. /**
  153. * regs_query_register_name() - query register name from its offset
  154. * @offset: the offset of a register in struct pt_regs.
  155. *
  156. * regs_query_register_name() returns the name of a register from its
  157. * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
  158. */
  159. const char *regs_query_register_name(unsigned int offset)
  160. {
  161. const struct pt_regs_offset *roff;
  162. for (roff = regoffset_table; roff->name != NULL; roff++)
  163. if (roff->offset == offset)
  164. return roff->name;
  165. return NULL;
  166. }
  167. /*
  168. * does not yet catch signals sent when the child dies.
  169. * in exit.c or in signal.c.
  170. */
  171. /*
  172. * Set of msr bits that gdb can change on behalf of a process.
  173. */
  174. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  175. #define MSR_DEBUGCHANGE 0
  176. #else
  177. #define MSR_DEBUGCHANGE (MSR_SE | MSR_BE)
  178. #endif
  179. /*
  180. * Max register writeable via put_reg
  181. */
  182. #ifdef CONFIG_PPC32
  183. #define PT_MAX_PUT_REG PT_MQ
  184. #else
  185. #define PT_MAX_PUT_REG PT_CCR
  186. #endif
  187. static unsigned long get_user_msr(struct task_struct *task)
  188. {
  189. return task->thread.regs->msr | task->thread.fpexc_mode;
  190. }
  191. static int set_user_msr(struct task_struct *task, unsigned long msr)
  192. {
  193. task->thread.regs->msr &= ~MSR_DEBUGCHANGE;
  194. task->thread.regs->msr |= msr & MSR_DEBUGCHANGE;
  195. return 0;
  196. }
  197. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  198. static unsigned long get_user_ckpt_msr(struct task_struct *task)
  199. {
  200. return task->thread.ckpt_regs.msr | task->thread.fpexc_mode;
  201. }
  202. static int set_user_ckpt_msr(struct task_struct *task, unsigned long msr)
  203. {
  204. task->thread.ckpt_regs.msr &= ~MSR_DEBUGCHANGE;
  205. task->thread.ckpt_regs.msr |= msr & MSR_DEBUGCHANGE;
  206. return 0;
  207. }
  208. static int set_user_ckpt_trap(struct task_struct *task, unsigned long trap)
  209. {
  210. task->thread.ckpt_regs.trap = trap & 0xfff0;
  211. return 0;
  212. }
  213. #endif
  214. #ifdef CONFIG_PPC64
  215. static int get_user_dscr(struct task_struct *task, unsigned long *data)
  216. {
  217. *data = task->thread.dscr;
  218. return 0;
  219. }
  220. static int set_user_dscr(struct task_struct *task, unsigned long dscr)
  221. {
  222. task->thread.dscr = dscr;
  223. task->thread.dscr_inherit = 1;
  224. return 0;
  225. }
  226. #else
  227. static int get_user_dscr(struct task_struct *task, unsigned long *data)
  228. {
  229. return -EIO;
  230. }
  231. static int set_user_dscr(struct task_struct *task, unsigned long dscr)
  232. {
  233. return -EIO;
  234. }
  235. #endif
  236. /*
  237. * We prevent mucking around with the reserved area of trap
  238. * which are used internally by the kernel.
  239. */
  240. static int set_user_trap(struct task_struct *task, unsigned long trap)
  241. {
  242. task->thread.regs->trap = trap & 0xfff0;
  243. return 0;
  244. }
  245. /*
  246. * Get contents of register REGNO in task TASK.
  247. */
  248. int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data)
  249. {
  250. if ((task->thread.regs == NULL) || !data)
  251. return -EIO;
  252. if (regno == PT_MSR) {
  253. *data = get_user_msr(task);
  254. return 0;
  255. }
  256. if (regno == PT_DSCR)
  257. return get_user_dscr(task, data);
  258. #ifdef CONFIG_PPC64
  259. /*
  260. * softe copies paca->irq_soft_mask variable state. Since irq_soft_mask is
  261. * no more used as a flag, lets force usr to alway see the softe value as 1
  262. * which means interrupts are not soft disabled.
  263. */
  264. if (regno == PT_SOFTE) {
  265. *data = 1;
  266. return 0;
  267. }
  268. #endif
  269. if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long))) {
  270. *data = ((unsigned long *)task->thread.regs)[regno];
  271. return 0;
  272. }
  273. return -EIO;
  274. }
  275. /*
  276. * Write contents of register REGNO in task TASK.
  277. */
  278. int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
  279. {
  280. if (task->thread.regs == NULL)
  281. return -EIO;
  282. if (regno == PT_MSR)
  283. return set_user_msr(task, data);
  284. if (regno == PT_TRAP)
  285. return set_user_trap(task, data);
  286. if (regno == PT_DSCR)
  287. return set_user_dscr(task, data);
  288. if (regno <= PT_MAX_PUT_REG) {
  289. ((unsigned long *)task->thread.regs)[regno] = data;
  290. return 0;
  291. }
  292. return -EIO;
  293. }
  294. static int gpr_get(struct task_struct *target, const struct user_regset *regset,
  295. unsigned int pos, unsigned int count,
  296. void *kbuf, void __user *ubuf)
  297. {
  298. int i, ret;
  299. if (target->thread.regs == NULL)
  300. return -EIO;
  301. if (!FULL_REGS(target->thread.regs)) {
  302. /* We have a partial register set. Fill 14-31 with bogus values */
  303. for (i = 14; i < 32; i++)
  304. target->thread.regs->gpr[i] = NV_REG_POISON;
  305. }
  306. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  307. target->thread.regs,
  308. 0, offsetof(struct pt_regs, msr));
  309. if (!ret) {
  310. unsigned long msr = get_user_msr(target);
  311. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
  312. offsetof(struct pt_regs, msr),
  313. offsetof(struct pt_regs, msr) +
  314. sizeof(msr));
  315. }
  316. BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
  317. offsetof(struct pt_regs, msr) + sizeof(long));
  318. if (!ret)
  319. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  320. &target->thread.regs->orig_gpr3,
  321. offsetof(struct pt_regs, orig_gpr3),
  322. sizeof(struct pt_regs));
  323. if (!ret)
  324. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  325. sizeof(struct pt_regs), -1);
  326. return ret;
  327. }
  328. static int gpr_set(struct task_struct *target, const struct user_regset *regset,
  329. unsigned int pos, unsigned int count,
  330. const void *kbuf, const void __user *ubuf)
  331. {
  332. unsigned long reg;
  333. int ret;
  334. if (target->thread.regs == NULL)
  335. return -EIO;
  336. CHECK_FULL_REGS(target->thread.regs);
  337. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  338. target->thread.regs,
  339. 0, PT_MSR * sizeof(reg));
  340. if (!ret && count > 0) {
  341. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
  342. PT_MSR * sizeof(reg),
  343. (PT_MSR + 1) * sizeof(reg));
  344. if (!ret)
  345. ret = set_user_msr(target, reg);
  346. }
  347. BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
  348. offsetof(struct pt_regs, msr) + sizeof(long));
  349. if (!ret)
  350. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  351. &target->thread.regs->orig_gpr3,
  352. PT_ORIG_R3 * sizeof(reg),
  353. (PT_MAX_PUT_REG + 1) * sizeof(reg));
  354. if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
  355. ret = user_regset_copyin_ignore(
  356. &pos, &count, &kbuf, &ubuf,
  357. (PT_MAX_PUT_REG + 1) * sizeof(reg),
  358. PT_TRAP * sizeof(reg));
  359. if (!ret && count > 0) {
  360. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
  361. PT_TRAP * sizeof(reg),
  362. (PT_TRAP + 1) * sizeof(reg));
  363. if (!ret)
  364. ret = set_user_trap(target, reg);
  365. }
  366. if (!ret)
  367. ret = user_regset_copyin_ignore(
  368. &pos, &count, &kbuf, &ubuf,
  369. (PT_TRAP + 1) * sizeof(reg), -1);
  370. return ret;
  371. }
  372. /*
  373. * Regardless of transactions, 'fp_state' holds the current running
  374. * value of all FPR registers and 'ckfp_state' holds the last checkpointed
  375. * value of all FPR registers for the current transaction.
  376. *
  377. * Userspace interface buffer layout:
  378. *
  379. * struct data {
  380. * u64 fpr[32];
  381. * u64 fpscr;
  382. * };
  383. */
  384. static int fpr_get(struct task_struct *target, const struct user_regset *regset,
  385. unsigned int pos, unsigned int count,
  386. void *kbuf, void __user *ubuf)
  387. {
  388. #ifdef CONFIG_VSX
  389. u64 buf[33];
  390. int i;
  391. flush_fp_to_thread(target);
  392. /* copy to local buffer then write that out */
  393. for (i = 0; i < 32 ; i++)
  394. buf[i] = target->thread.TS_FPR(i);
  395. buf[32] = target->thread.fp_state.fpscr;
  396. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
  397. #else
  398. BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
  399. offsetof(struct thread_fp_state, fpr[32]));
  400. flush_fp_to_thread(target);
  401. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  402. &target->thread.fp_state, 0, -1);
  403. #endif
  404. }
  405. /*
  406. * Regardless of transactions, 'fp_state' holds the current running
  407. * value of all FPR registers and 'ckfp_state' holds the last checkpointed
  408. * value of all FPR registers for the current transaction.
  409. *
  410. * Userspace interface buffer layout:
  411. *
  412. * struct data {
  413. * u64 fpr[32];
  414. * u64 fpscr;
  415. * };
  416. *
  417. */
  418. static int fpr_set(struct task_struct *target, const struct user_regset *regset,
  419. unsigned int pos, unsigned int count,
  420. const void *kbuf, const void __user *ubuf)
  421. {
  422. #ifdef CONFIG_VSX
  423. u64 buf[33];
  424. int i;
  425. flush_fp_to_thread(target);
  426. for (i = 0; i < 32 ; i++)
  427. buf[i] = target->thread.TS_FPR(i);
  428. buf[32] = target->thread.fp_state.fpscr;
  429. /* copy to local buffer then write that out */
  430. i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
  431. if (i)
  432. return i;
  433. for (i = 0; i < 32 ; i++)
  434. target->thread.TS_FPR(i) = buf[i];
  435. target->thread.fp_state.fpscr = buf[32];
  436. return 0;
  437. #else
  438. BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
  439. offsetof(struct thread_fp_state, fpr[32]));
  440. flush_fp_to_thread(target);
  441. return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  442. &target->thread.fp_state, 0, -1);
  443. #endif
  444. }
  445. #ifdef CONFIG_ALTIVEC
  446. /*
  447. * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
  448. * The transfer totals 34 quadword. Quadwords 0-31 contain the
  449. * corresponding vector registers. Quadword 32 contains the vscr as the
  450. * last word (offset 12) within that quadword. Quadword 33 contains the
  451. * vrsave as the first word (offset 0) within the quadword.
  452. *
  453. * This definition of the VMX state is compatible with the current PPC32
  454. * ptrace interface. This allows signal handling and ptrace to use the
  455. * same structures. This also simplifies the implementation of a bi-arch
  456. * (combined (32- and 64-bit) gdb.
  457. */
  458. static int vr_active(struct task_struct *target,
  459. const struct user_regset *regset)
  460. {
  461. flush_altivec_to_thread(target);
  462. return target->thread.used_vr ? regset->n : 0;
  463. }
  464. /*
  465. * Regardless of transactions, 'vr_state' holds the current running
  466. * value of all the VMX registers and 'ckvr_state' holds the last
  467. * checkpointed value of all the VMX registers for the current
  468. * transaction to fall back on in case it aborts.
  469. *
  470. * Userspace interface buffer layout:
  471. *
  472. * struct data {
  473. * vector128 vr[32];
  474. * vector128 vscr;
  475. * vector128 vrsave;
  476. * };
  477. */
  478. static int vr_get(struct task_struct *target, const struct user_regset *regset,
  479. unsigned int pos, unsigned int count,
  480. void *kbuf, void __user *ubuf)
  481. {
  482. int ret;
  483. flush_altivec_to_thread(target);
  484. BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
  485. offsetof(struct thread_vr_state, vr[32]));
  486. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  487. &target->thread.vr_state, 0,
  488. 33 * sizeof(vector128));
  489. if (!ret) {
  490. /*
  491. * Copy out only the low-order word of vrsave.
  492. */
  493. union {
  494. elf_vrreg_t reg;
  495. u32 word;
  496. } vrsave;
  497. memset(&vrsave, 0, sizeof(vrsave));
  498. vrsave.word = target->thread.vrsave;
  499. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
  500. 33 * sizeof(vector128), -1);
  501. }
  502. return ret;
  503. }
  504. /*
  505. * Regardless of transactions, 'vr_state' holds the current running
  506. * value of all the VMX registers and 'ckvr_state' holds the last
  507. * checkpointed value of all the VMX registers for the current
  508. * transaction to fall back on in case it aborts.
  509. *
  510. * Userspace interface buffer layout:
  511. *
  512. * struct data {
  513. * vector128 vr[32];
  514. * vector128 vscr;
  515. * vector128 vrsave;
  516. * };
  517. */
  518. static int vr_set(struct task_struct *target, const struct user_regset *regset,
  519. unsigned int pos, unsigned int count,
  520. const void *kbuf, const void __user *ubuf)
  521. {
  522. int ret;
  523. flush_altivec_to_thread(target);
  524. BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
  525. offsetof(struct thread_vr_state, vr[32]));
  526. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  527. &target->thread.vr_state, 0,
  528. 33 * sizeof(vector128));
  529. if (!ret && count > 0) {
  530. /*
  531. * We use only the first word of vrsave.
  532. */
  533. union {
  534. elf_vrreg_t reg;
  535. u32 word;
  536. } vrsave;
  537. memset(&vrsave, 0, sizeof(vrsave));
  538. vrsave.word = target->thread.vrsave;
  539. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
  540. 33 * sizeof(vector128), -1);
  541. if (!ret)
  542. target->thread.vrsave = vrsave.word;
  543. }
  544. return ret;
  545. }
  546. #endif /* CONFIG_ALTIVEC */
  547. #ifdef CONFIG_VSX
  548. /*
  549. * Currently to set and and get all the vsx state, you need to call
  550. * the fp and VMX calls as well. This only get/sets the lower 32
  551. * 128bit VSX registers.
  552. */
  553. static int vsr_active(struct task_struct *target,
  554. const struct user_regset *regset)
  555. {
  556. flush_vsx_to_thread(target);
  557. return target->thread.used_vsr ? regset->n : 0;
  558. }
  559. /*
  560. * Regardless of transactions, 'fp_state' holds the current running
  561. * value of all FPR registers and 'ckfp_state' holds the last
  562. * checkpointed value of all FPR registers for the current
  563. * transaction.
  564. *
  565. * Userspace interface buffer layout:
  566. *
  567. * struct data {
  568. * u64 vsx[32];
  569. * };
  570. */
  571. static int vsr_get(struct task_struct *target, const struct user_regset *regset,
  572. unsigned int pos, unsigned int count,
  573. void *kbuf, void __user *ubuf)
  574. {
  575. u64 buf[32];
  576. int ret, i;
  577. flush_tmregs_to_thread(target);
  578. flush_fp_to_thread(target);
  579. flush_altivec_to_thread(target);
  580. flush_vsx_to_thread(target);
  581. for (i = 0; i < 32 ; i++)
  582. buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
  583. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  584. buf, 0, 32 * sizeof(double));
  585. return ret;
  586. }
  587. /*
  588. * Regardless of transactions, 'fp_state' holds the current running
  589. * value of all FPR registers and 'ckfp_state' holds the last
  590. * checkpointed value of all FPR registers for the current
  591. * transaction.
  592. *
  593. * Userspace interface buffer layout:
  594. *
  595. * struct data {
  596. * u64 vsx[32];
  597. * };
  598. */
  599. static int vsr_set(struct task_struct *target, const struct user_regset *regset,
  600. unsigned int pos, unsigned int count,
  601. const void *kbuf, const void __user *ubuf)
  602. {
  603. u64 buf[32];
  604. int ret,i;
  605. flush_tmregs_to_thread(target);
  606. flush_fp_to_thread(target);
  607. flush_altivec_to_thread(target);
  608. flush_vsx_to_thread(target);
  609. for (i = 0; i < 32 ; i++)
  610. buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
  611. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  612. buf, 0, 32 * sizeof(double));
  613. if (!ret)
  614. for (i = 0; i < 32 ; i++)
  615. target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
  616. return ret;
  617. }
  618. #endif /* CONFIG_VSX */
  619. #ifdef CONFIG_SPE
  620. /*
  621. * For get_evrregs/set_evrregs functions 'data' has the following layout:
  622. *
  623. * struct {
  624. * u32 evr[32];
  625. * u64 acc;
  626. * u32 spefscr;
  627. * }
  628. */
  629. static int evr_active(struct task_struct *target,
  630. const struct user_regset *regset)
  631. {
  632. flush_spe_to_thread(target);
  633. return target->thread.used_spe ? regset->n : 0;
  634. }
  635. static int evr_get(struct task_struct *target, const struct user_regset *regset,
  636. unsigned int pos, unsigned int count,
  637. void *kbuf, void __user *ubuf)
  638. {
  639. int ret;
  640. flush_spe_to_thread(target);
  641. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  642. &target->thread.evr,
  643. 0, sizeof(target->thread.evr));
  644. BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
  645. offsetof(struct thread_struct, spefscr));
  646. if (!ret)
  647. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  648. &target->thread.acc,
  649. sizeof(target->thread.evr), -1);
  650. return ret;
  651. }
  652. static int evr_set(struct task_struct *target, const struct user_regset *regset,
  653. unsigned int pos, unsigned int count,
  654. const void *kbuf, const void __user *ubuf)
  655. {
  656. int ret;
  657. flush_spe_to_thread(target);
  658. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  659. &target->thread.evr,
  660. 0, sizeof(target->thread.evr));
  661. BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
  662. offsetof(struct thread_struct, spefscr));
  663. if (!ret)
  664. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  665. &target->thread.acc,
  666. sizeof(target->thread.evr), -1);
  667. return ret;
  668. }
  669. #endif /* CONFIG_SPE */
  670. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  671. /**
  672. * tm_cgpr_active - get active number of registers in CGPR
  673. * @target: The target task.
  674. * @regset: The user regset structure.
  675. *
  676. * This function checks for the active number of available
  677. * regisers in transaction checkpointed GPR category.
  678. */
  679. static int tm_cgpr_active(struct task_struct *target,
  680. const struct user_regset *regset)
  681. {
  682. if (!cpu_has_feature(CPU_FTR_TM))
  683. return -ENODEV;
  684. if (!MSR_TM_ACTIVE(target->thread.regs->msr))
  685. return 0;
  686. return regset->n;
  687. }
  688. /**
  689. * tm_cgpr_get - get CGPR registers
  690. * @target: The target task.
  691. * @regset: The user regset structure.
  692. * @pos: The buffer position.
  693. * @count: Number of bytes to copy.
  694. * @kbuf: Kernel buffer to copy from.
  695. * @ubuf: User buffer to copy into.
  696. *
  697. * This function gets transaction checkpointed GPR registers.
  698. *
  699. * When the transaction is active, 'ckpt_regs' holds all the checkpointed
  700. * GPR register values for the current transaction to fall back on if it
  701. * aborts in between. This function gets those checkpointed GPR registers.
  702. * The userspace interface buffer layout is as follows.
  703. *
  704. * struct data {
  705. * struct pt_regs ckpt_regs;
  706. * };
  707. */
  708. static int tm_cgpr_get(struct task_struct *target,
  709. const struct user_regset *regset,
  710. unsigned int pos, unsigned int count,
  711. void *kbuf, void __user *ubuf)
  712. {
  713. int ret;
  714. if (!cpu_has_feature(CPU_FTR_TM))
  715. return -ENODEV;
  716. if (!MSR_TM_ACTIVE(target->thread.regs->msr))
  717. return -ENODATA;
  718. flush_tmregs_to_thread(target);
  719. flush_fp_to_thread(target);
  720. flush_altivec_to_thread(target);
  721. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  722. &target->thread.ckpt_regs,
  723. 0, offsetof(struct pt_regs, msr));
  724. if (!ret) {
  725. unsigned long msr = get_user_ckpt_msr(target);
  726. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
  727. offsetof(struct pt_regs, msr),
  728. offsetof(struct pt_regs, msr) +
  729. sizeof(msr));
  730. }
  731. BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
  732. offsetof(struct pt_regs, msr) + sizeof(long));
  733. if (!ret)
  734. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  735. &target->thread.ckpt_regs.orig_gpr3,
  736. offsetof(struct pt_regs, orig_gpr3),
  737. sizeof(struct pt_regs));
  738. if (!ret)
  739. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  740. sizeof(struct pt_regs), -1);
  741. return ret;
  742. }
  743. /*
  744. * tm_cgpr_set - set the CGPR registers
  745. * @target: The target task.
  746. * @regset: The user regset structure.
  747. * @pos: The buffer position.
  748. * @count: Number of bytes to copy.
  749. * @kbuf: Kernel buffer to copy into.
  750. * @ubuf: User buffer to copy from.
  751. *
  752. * This function sets in transaction checkpointed GPR registers.
  753. *
  754. * When the transaction is active, 'ckpt_regs' holds the checkpointed
  755. * GPR register values for the current transaction to fall back on if it
  756. * aborts in between. This function sets those checkpointed GPR registers.
  757. * The userspace interface buffer layout is as follows.
  758. *
  759. * struct data {
  760. * struct pt_regs ckpt_regs;
  761. * };
  762. */
  763. static int tm_cgpr_set(struct task_struct *target,
  764. const struct user_regset *regset,
  765. unsigned int pos, unsigned int count,
  766. const void *kbuf, const void __user *ubuf)
  767. {
  768. unsigned long reg;
  769. int ret;
  770. if (!cpu_has_feature(CPU_FTR_TM))
  771. return -ENODEV;
  772. if (!MSR_TM_ACTIVE(target->thread.regs->msr))
  773. return -ENODATA;
  774. flush_tmregs_to_thread(target);
  775. flush_fp_to_thread(target);
  776. flush_altivec_to_thread(target);
  777. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  778. &target->thread.ckpt_regs,
  779. 0, PT_MSR * sizeof(reg));
  780. if (!ret && count > 0) {
  781. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
  782. PT_MSR * sizeof(reg),
  783. (PT_MSR + 1) * sizeof(reg));
  784. if (!ret)
  785. ret = set_user_ckpt_msr(target, reg);
  786. }
  787. BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
  788. offsetof(struct pt_regs, msr) + sizeof(long));
  789. if (!ret)
  790. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  791. &target->thread.ckpt_regs.orig_gpr3,
  792. PT_ORIG_R3 * sizeof(reg),
  793. (PT_MAX_PUT_REG + 1) * sizeof(reg));
  794. if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
  795. ret = user_regset_copyin_ignore(
  796. &pos, &count, &kbuf, &ubuf,
  797. (PT_MAX_PUT_REG + 1) * sizeof(reg),
  798. PT_TRAP * sizeof(reg));
  799. if (!ret && count > 0) {
  800. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
  801. PT_TRAP * sizeof(reg),
  802. (PT_TRAP + 1) * sizeof(reg));
  803. if (!ret)
  804. ret = set_user_ckpt_trap(target, reg);
  805. }
  806. if (!ret)
  807. ret = user_regset_copyin_ignore(
  808. &pos, &count, &kbuf, &ubuf,
  809. (PT_TRAP + 1) * sizeof(reg), -1);
  810. return ret;
  811. }
  812. /**
  813. * tm_cfpr_active - get active number of registers in CFPR
  814. * @target: The target task.
  815. * @regset: The user regset structure.
  816. *
  817. * This function checks for the active number of available
  818. * regisers in transaction checkpointed FPR category.
  819. */
  820. static int tm_cfpr_active(struct task_struct *target,
  821. const struct user_regset *regset)
  822. {
  823. if (!cpu_has_feature(CPU_FTR_TM))
  824. return -ENODEV;
  825. if (!MSR_TM_ACTIVE(target->thread.regs->msr))
  826. return 0;
  827. return regset->n;
  828. }
  829. /**
  830. * tm_cfpr_get - get CFPR registers
  831. * @target: The target task.
  832. * @regset: The user regset structure.
  833. * @pos: The buffer position.
  834. * @count: Number of bytes to copy.
  835. * @kbuf: Kernel buffer to copy from.
  836. * @ubuf: User buffer to copy into.
  837. *
  838. * This function gets in transaction checkpointed FPR registers.
  839. *
  840. * When the transaction is active 'ckfp_state' holds the checkpointed
  841. * values for the current transaction to fall back on if it aborts
  842. * in between. This function gets those checkpointed FPR registers.
  843. * The userspace interface buffer layout is as follows.
  844. *
  845. * struct data {
  846. * u64 fpr[32];
  847. * u64 fpscr;
  848. *};
  849. */
  850. static int tm_cfpr_get(struct task_struct *target,
  851. const struct user_regset *regset,
  852. unsigned int pos, unsigned int count,
  853. void *kbuf, void __user *ubuf)
  854. {
  855. u64 buf[33];
  856. int i;
  857. if (!cpu_has_feature(CPU_FTR_TM))
  858. return -ENODEV;
  859. if (!MSR_TM_ACTIVE(target->thread.regs->msr))
  860. return -ENODATA;
  861. flush_tmregs_to_thread(target);
  862. flush_fp_to_thread(target);
  863. flush_altivec_to_thread(target);
  864. /* copy to local buffer then write that out */
  865. for (i = 0; i < 32 ; i++)
  866. buf[i] = target->thread.TS_CKFPR(i);
  867. buf[32] = target->thread.ckfp_state.fpscr;
  868. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
  869. }
  870. /**
  871. * tm_cfpr_set - set CFPR registers
  872. * @target: The target task.
  873. * @regset: The user regset structure.
  874. * @pos: The buffer position.
  875. * @count: Number of bytes to copy.
  876. * @kbuf: Kernel buffer to copy into.
  877. * @ubuf: User buffer to copy from.
  878. *
  879. * This function sets in transaction checkpointed FPR registers.
  880. *
  881. * When the transaction is active 'ckfp_state' holds the checkpointed
  882. * FPR register values for the current transaction to fall back on
  883. * if it aborts in between. This function sets these checkpointed
  884. * FPR registers. The userspace interface buffer layout is as follows.
  885. *
  886. * struct data {
  887. * u64 fpr[32];
  888. * u64 fpscr;
  889. *};
  890. */
  891. static int tm_cfpr_set(struct task_struct *target,
  892. const struct user_regset *regset,
  893. unsigned int pos, unsigned int count,
  894. const void *kbuf, const void __user *ubuf)
  895. {
  896. u64 buf[33];
  897. int i;
  898. if (!cpu_has_feature(CPU_FTR_TM))
  899. return -ENODEV;
  900. if (!MSR_TM_ACTIVE(target->thread.regs->msr))
  901. return -ENODATA;
  902. flush_tmregs_to_thread(target);
  903. flush_fp_to_thread(target);
  904. flush_altivec_to_thread(target);
  905. for (i = 0; i < 32; i++)
  906. buf[i] = target->thread.TS_CKFPR(i);
  907. buf[32] = target->thread.ckfp_state.fpscr;
  908. /* copy to local buffer then write that out */
  909. i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
  910. if (i)
  911. return i;
  912. for (i = 0; i < 32 ; i++)
  913. target->thread.TS_CKFPR(i) = buf[i];
  914. target->thread.ckfp_state.fpscr = buf[32];
  915. return 0;
  916. }
  917. /**
  918. * tm_cvmx_active - get active number of registers in CVMX
  919. * @target: The target task.
  920. * @regset: The user regset structure.
  921. *
  922. * This function checks for the active number of available
  923. * regisers in checkpointed VMX category.
  924. */
  925. static int tm_cvmx_active(struct task_struct *target,
  926. const struct user_regset *regset)
  927. {
  928. if (!cpu_has_feature(CPU_FTR_TM))
  929. return -ENODEV;
  930. if (!MSR_TM_ACTIVE(target->thread.regs->msr))
  931. return 0;
  932. return regset->n;
  933. }
  934. /**
  935. * tm_cvmx_get - get CMVX registers
  936. * @target: The target task.
  937. * @regset: The user regset structure.
  938. * @pos: The buffer position.
  939. * @count: Number of bytes to copy.
  940. * @kbuf: Kernel buffer to copy from.
  941. * @ubuf: User buffer to copy into.
  942. *
  943. * This function gets in transaction checkpointed VMX registers.
  944. *
  945. * When the transaction is active 'ckvr_state' and 'ckvrsave' hold
  946. * the checkpointed values for the current transaction to fall
  947. * back on if it aborts in between. The userspace interface buffer
  948. * layout is as follows.
  949. *
  950. * struct data {
  951. * vector128 vr[32];
  952. * vector128 vscr;
  953. * vector128 vrsave;
  954. *};
  955. */
  956. static int tm_cvmx_get(struct task_struct *target,
  957. const struct user_regset *regset,
  958. unsigned int pos, unsigned int count,
  959. void *kbuf, void __user *ubuf)
  960. {
  961. int ret;
  962. BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
  963. if (!cpu_has_feature(CPU_FTR_TM))
  964. return -ENODEV;
  965. if (!MSR_TM_ACTIVE(target->thread.regs->msr))
  966. return -ENODATA;
  967. /* Flush the state */
  968. flush_tmregs_to_thread(target);
  969. flush_fp_to_thread(target);
  970. flush_altivec_to_thread(target);
  971. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  972. &target->thread.ckvr_state, 0,
  973. 33 * sizeof(vector128));
  974. if (!ret) {
  975. /*
  976. * Copy out only the low-order word of vrsave.
  977. */
  978. union {
  979. elf_vrreg_t reg;
  980. u32 word;
  981. } vrsave;
  982. memset(&vrsave, 0, sizeof(vrsave));
  983. vrsave.word = target->thread.ckvrsave;
  984. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
  985. 33 * sizeof(vector128), -1);
  986. }
  987. return ret;
  988. }
  989. /**
  990. * tm_cvmx_set - set CMVX registers
  991. * @target: The target task.
  992. * @regset: The user regset structure.
  993. * @pos: The buffer position.
  994. * @count: Number of bytes to copy.
  995. * @kbuf: Kernel buffer to copy into.
  996. * @ubuf: User buffer to copy from.
  997. *
  998. * This function sets in transaction checkpointed VMX registers.
  999. *
  1000. * When the transaction is active 'ckvr_state' and 'ckvrsave' hold
  1001. * the checkpointed values for the current transaction to fall
  1002. * back on if it aborts in between. The userspace interface buffer
  1003. * layout is as follows.
  1004. *
  1005. * struct data {
  1006. * vector128 vr[32];
  1007. * vector128 vscr;
  1008. * vector128 vrsave;
  1009. *};
  1010. */
  1011. static int tm_cvmx_set(struct task_struct *target,
  1012. const struct user_regset *regset,
  1013. unsigned int pos, unsigned int count,
  1014. const void *kbuf, const void __user *ubuf)
  1015. {
  1016. int ret;
  1017. BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
  1018. if (!cpu_has_feature(CPU_FTR_TM))
  1019. return -ENODEV;
  1020. if (!MSR_TM_ACTIVE(target->thread.regs->msr))
  1021. return -ENODATA;
  1022. flush_tmregs_to_thread(target);
  1023. flush_fp_to_thread(target);
  1024. flush_altivec_to_thread(target);
  1025. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  1026. &target->thread.ckvr_state, 0,
  1027. 33 * sizeof(vector128));
  1028. if (!ret && count > 0) {
  1029. /*
  1030. * We use only the low-order word of vrsave.
  1031. */
  1032. union {
  1033. elf_vrreg_t reg;
  1034. u32 word;
  1035. } vrsave;
  1036. memset(&vrsave, 0, sizeof(vrsave));
  1037. vrsave.word = target->thread.ckvrsave;
  1038. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
  1039. 33 * sizeof(vector128), -1);
  1040. if (!ret)
  1041. target->thread.ckvrsave = vrsave.word;
  1042. }
  1043. return ret;
  1044. }
  1045. /**
  1046. * tm_cvsx_active - get active number of registers in CVSX
  1047. * @target: The target task.
  1048. * @regset: The user regset structure.
  1049. *
  1050. * This function checks for the active number of available
  1051. * regisers in transaction checkpointed VSX category.
  1052. */
  1053. static int tm_cvsx_active(struct task_struct *target,
  1054. const struct user_regset *regset)
  1055. {
  1056. if (!cpu_has_feature(CPU_FTR_TM))
  1057. return -ENODEV;
  1058. if (!MSR_TM_ACTIVE(target->thread.regs->msr))
  1059. return 0;
  1060. flush_vsx_to_thread(target);
  1061. return target->thread.used_vsr ? regset->n : 0;
  1062. }
  1063. /**
  1064. * tm_cvsx_get - get CVSX registers
  1065. * @target: The target task.
  1066. * @regset: The user regset structure.
  1067. * @pos: The buffer position.
  1068. * @count: Number of bytes to copy.
  1069. * @kbuf: Kernel buffer to copy from.
  1070. * @ubuf: User buffer to copy into.
  1071. *
  1072. * This function gets in transaction checkpointed VSX registers.
  1073. *
  1074. * When the transaction is active 'ckfp_state' holds the checkpointed
  1075. * values for the current transaction to fall back on if it aborts
  1076. * in between. This function gets those checkpointed VSX registers.
  1077. * The userspace interface buffer layout is as follows.
  1078. *
  1079. * struct data {
  1080. * u64 vsx[32];
  1081. *};
  1082. */
  1083. static int tm_cvsx_get(struct task_struct *target,
  1084. const struct user_regset *regset,
  1085. unsigned int pos, unsigned int count,
  1086. void *kbuf, void __user *ubuf)
  1087. {
  1088. u64 buf[32];
  1089. int ret, i;
  1090. if (!cpu_has_feature(CPU_FTR_TM))
  1091. return -ENODEV;
  1092. if (!MSR_TM_ACTIVE(target->thread.regs->msr))
  1093. return -ENODATA;
  1094. /* Flush the state */
  1095. flush_tmregs_to_thread(target);
  1096. flush_fp_to_thread(target);
  1097. flush_altivec_to_thread(target);
  1098. flush_vsx_to_thread(target);
  1099. for (i = 0; i < 32 ; i++)
  1100. buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
  1101. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  1102. buf, 0, 32 * sizeof(double));
  1103. return ret;
  1104. }
  1105. /**
  1106. * tm_cvsx_set - set CFPR registers
  1107. * @target: The target task.
  1108. * @regset: The user regset structure.
  1109. * @pos: The buffer position.
  1110. * @count: Number of bytes to copy.
  1111. * @kbuf: Kernel buffer to copy into.
  1112. * @ubuf: User buffer to copy from.
  1113. *
  1114. * This function sets in transaction checkpointed VSX registers.
  1115. *
  1116. * When the transaction is active 'ckfp_state' holds the checkpointed
  1117. * VSX register values for the current transaction to fall back on
  1118. * if it aborts in between. This function sets these checkpointed
  1119. * FPR registers. The userspace interface buffer layout is as follows.
  1120. *
  1121. * struct data {
  1122. * u64 vsx[32];
  1123. *};
  1124. */
  1125. static int tm_cvsx_set(struct task_struct *target,
  1126. const struct user_regset *regset,
  1127. unsigned int pos, unsigned int count,
  1128. const void *kbuf, const void __user *ubuf)
  1129. {
  1130. u64 buf[32];
  1131. int ret, i;
  1132. if (!cpu_has_feature(CPU_FTR_TM))
  1133. return -ENODEV;
  1134. if (!MSR_TM_ACTIVE(target->thread.regs->msr))
  1135. return -ENODATA;
  1136. /* Flush the state */
  1137. flush_tmregs_to_thread(target);
  1138. flush_fp_to_thread(target);
  1139. flush_altivec_to_thread(target);
  1140. flush_vsx_to_thread(target);
  1141. for (i = 0; i < 32 ; i++)
  1142. buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
  1143. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  1144. buf, 0, 32 * sizeof(double));
  1145. if (!ret)
  1146. for (i = 0; i < 32 ; i++)
  1147. target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
  1148. return ret;
  1149. }
  1150. /**
  1151. * tm_spr_active - get active number of registers in TM SPR
  1152. * @target: The target task.
  1153. * @regset: The user regset structure.
  1154. *
  1155. * This function checks the active number of available
  1156. * regisers in the transactional memory SPR category.
  1157. */
  1158. static int tm_spr_active(struct task_struct *target,
  1159. const struct user_regset *regset)
  1160. {
  1161. if (!cpu_has_feature(CPU_FTR_TM))
  1162. return -ENODEV;
  1163. return regset->n;
  1164. }
  1165. /**
  1166. * tm_spr_get - get the TM related SPR registers
  1167. * @target: The target task.
  1168. * @regset: The user regset structure.
  1169. * @pos: The buffer position.
  1170. * @count: Number of bytes to copy.
  1171. * @kbuf: Kernel buffer to copy from.
  1172. * @ubuf: User buffer to copy into.
  1173. *
  1174. * This function gets transactional memory related SPR registers.
  1175. * The userspace interface buffer layout is as follows.
  1176. *
  1177. * struct {
  1178. * u64 tm_tfhar;
  1179. * u64 tm_texasr;
  1180. * u64 tm_tfiar;
  1181. * };
  1182. */
  1183. static int tm_spr_get(struct task_struct *target,
  1184. const struct user_regset *regset,
  1185. unsigned int pos, unsigned int count,
  1186. void *kbuf, void __user *ubuf)
  1187. {
  1188. int ret;
  1189. /* Build tests */
  1190. BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
  1191. BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
  1192. BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
  1193. if (!cpu_has_feature(CPU_FTR_TM))
  1194. return -ENODEV;
  1195. /* Flush the states */
  1196. flush_tmregs_to_thread(target);
  1197. flush_fp_to_thread(target);
  1198. flush_altivec_to_thread(target);
  1199. /* TFHAR register */
  1200. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  1201. &target->thread.tm_tfhar, 0, sizeof(u64));
  1202. /* TEXASR register */
  1203. if (!ret)
  1204. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  1205. &target->thread.tm_texasr, sizeof(u64),
  1206. 2 * sizeof(u64));
  1207. /* TFIAR register */
  1208. if (!ret)
  1209. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  1210. &target->thread.tm_tfiar,
  1211. 2 * sizeof(u64), 3 * sizeof(u64));
  1212. return ret;
  1213. }
  1214. /**
  1215. * tm_spr_set - set the TM related SPR registers
  1216. * @target: The target task.
  1217. * @regset: The user regset structure.
  1218. * @pos: The buffer position.
  1219. * @count: Number of bytes to copy.
  1220. * @kbuf: Kernel buffer to copy into.
  1221. * @ubuf: User buffer to copy from.
  1222. *
  1223. * This function sets transactional memory related SPR registers.
  1224. * The userspace interface buffer layout is as follows.
  1225. *
  1226. * struct {
  1227. * u64 tm_tfhar;
  1228. * u64 tm_texasr;
  1229. * u64 tm_tfiar;
  1230. * };
  1231. */
  1232. static int tm_spr_set(struct task_struct *target,
  1233. const struct user_regset *regset,
  1234. unsigned int pos, unsigned int count,
  1235. const void *kbuf, const void __user *ubuf)
  1236. {
  1237. int ret;
  1238. /* Build tests */
  1239. BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
  1240. BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
  1241. BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
  1242. if (!cpu_has_feature(CPU_FTR_TM))
  1243. return -ENODEV;
  1244. /* Flush the states */
  1245. flush_tmregs_to_thread(target);
  1246. flush_fp_to_thread(target);
  1247. flush_altivec_to_thread(target);
  1248. /* TFHAR register */
  1249. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  1250. &target->thread.tm_tfhar, 0, sizeof(u64));
  1251. /* TEXASR register */
  1252. if (!ret)
  1253. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  1254. &target->thread.tm_texasr, sizeof(u64),
  1255. 2 * sizeof(u64));
  1256. /* TFIAR register */
  1257. if (!ret)
  1258. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  1259. &target->thread.tm_tfiar,
  1260. 2 * sizeof(u64), 3 * sizeof(u64));
  1261. return ret;
  1262. }
  1263. static int tm_tar_active(struct task_struct *target,
  1264. const struct user_regset *regset)
  1265. {
  1266. if (!cpu_has_feature(CPU_FTR_TM))
  1267. return -ENODEV;
  1268. if (MSR_TM_ACTIVE(target->thread.regs->msr))
  1269. return regset->n;
  1270. return 0;
  1271. }
  1272. static int tm_tar_get(struct task_struct *target,
  1273. const struct user_regset *regset,
  1274. unsigned int pos, unsigned int count,
  1275. void *kbuf, void __user *ubuf)
  1276. {
  1277. int ret;
  1278. if (!cpu_has_feature(CPU_FTR_TM))
  1279. return -ENODEV;
  1280. if (!MSR_TM_ACTIVE(target->thread.regs->msr))
  1281. return -ENODATA;
  1282. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  1283. &target->thread.tm_tar, 0, sizeof(u64));
  1284. return ret;
  1285. }
  1286. static int tm_tar_set(struct task_struct *target,
  1287. const struct user_regset *regset,
  1288. unsigned int pos, unsigned int count,
  1289. const void *kbuf, const void __user *ubuf)
  1290. {
  1291. int ret;
  1292. if (!cpu_has_feature(CPU_FTR_TM))
  1293. return -ENODEV;
  1294. if (!MSR_TM_ACTIVE(target->thread.regs->msr))
  1295. return -ENODATA;
  1296. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  1297. &target->thread.tm_tar, 0, sizeof(u64));
  1298. return ret;
  1299. }
  1300. static int tm_ppr_active(struct task_struct *target,
  1301. const struct user_regset *regset)
  1302. {
  1303. if (!cpu_has_feature(CPU_FTR_TM))
  1304. return -ENODEV;
  1305. if (MSR_TM_ACTIVE(target->thread.regs->msr))
  1306. return regset->n;
  1307. return 0;
  1308. }
  1309. static int tm_ppr_get(struct task_struct *target,
  1310. const struct user_regset *regset,
  1311. unsigned int pos, unsigned int count,
  1312. void *kbuf, void __user *ubuf)
  1313. {
  1314. int ret;
  1315. if (!cpu_has_feature(CPU_FTR_TM))
  1316. return -ENODEV;
  1317. if (!MSR_TM_ACTIVE(target->thread.regs->msr))
  1318. return -ENODATA;
  1319. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  1320. &target->thread.tm_ppr, 0, sizeof(u64));
  1321. return ret;
  1322. }
  1323. static int tm_ppr_set(struct task_struct *target,
  1324. const struct user_regset *regset,
  1325. unsigned int pos, unsigned int count,
  1326. const void *kbuf, const void __user *ubuf)
  1327. {
  1328. int ret;
  1329. if (!cpu_has_feature(CPU_FTR_TM))
  1330. return -ENODEV;
  1331. if (!MSR_TM_ACTIVE(target->thread.regs->msr))
  1332. return -ENODATA;
  1333. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  1334. &target->thread.tm_ppr, 0, sizeof(u64));
  1335. return ret;
  1336. }
  1337. static int tm_dscr_active(struct task_struct *target,
  1338. const struct user_regset *regset)
  1339. {
  1340. if (!cpu_has_feature(CPU_FTR_TM))
  1341. return -ENODEV;
  1342. if (MSR_TM_ACTIVE(target->thread.regs->msr))
  1343. return regset->n;
  1344. return 0;
  1345. }
  1346. static int tm_dscr_get(struct task_struct *target,
  1347. const struct user_regset *regset,
  1348. unsigned int pos, unsigned int count,
  1349. void *kbuf, void __user *ubuf)
  1350. {
  1351. int ret;
  1352. if (!cpu_has_feature(CPU_FTR_TM))
  1353. return -ENODEV;
  1354. if (!MSR_TM_ACTIVE(target->thread.regs->msr))
  1355. return -ENODATA;
  1356. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  1357. &target->thread.tm_dscr, 0, sizeof(u64));
  1358. return ret;
  1359. }
  1360. static int tm_dscr_set(struct task_struct *target,
  1361. const struct user_regset *regset,
  1362. unsigned int pos, unsigned int count,
  1363. const void *kbuf, const void __user *ubuf)
  1364. {
  1365. int ret;
  1366. if (!cpu_has_feature(CPU_FTR_TM))
  1367. return -ENODEV;
  1368. if (!MSR_TM_ACTIVE(target->thread.regs->msr))
  1369. return -ENODATA;
  1370. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  1371. &target->thread.tm_dscr, 0, sizeof(u64));
  1372. return ret;
  1373. }
  1374. #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
  1375. #ifdef CONFIG_PPC64
  1376. static int ppr_get(struct task_struct *target,
  1377. const struct user_regset *regset,
  1378. unsigned int pos, unsigned int count,
  1379. void *kbuf, void __user *ubuf)
  1380. {
  1381. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  1382. &target->thread.ppr, 0, sizeof(u64));
  1383. }
  1384. static int ppr_set(struct task_struct *target,
  1385. const struct user_regset *regset,
  1386. unsigned int pos, unsigned int count,
  1387. const void *kbuf, const void __user *ubuf)
  1388. {
  1389. return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  1390. &target->thread.ppr, 0, sizeof(u64));
  1391. }
  1392. static int dscr_get(struct task_struct *target,
  1393. const struct user_regset *regset,
  1394. unsigned int pos, unsigned int count,
  1395. void *kbuf, void __user *ubuf)
  1396. {
  1397. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  1398. &target->thread.dscr, 0, sizeof(u64));
  1399. }
  1400. static int dscr_set(struct task_struct *target,
  1401. const struct user_regset *regset,
  1402. unsigned int pos, unsigned int count,
  1403. const void *kbuf, const void __user *ubuf)
  1404. {
  1405. return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  1406. &target->thread.dscr, 0, sizeof(u64));
  1407. }
  1408. #endif
  1409. #ifdef CONFIG_PPC_BOOK3S_64
  1410. static int tar_get(struct task_struct *target,
  1411. const struct user_regset *regset,
  1412. unsigned int pos, unsigned int count,
  1413. void *kbuf, void __user *ubuf)
  1414. {
  1415. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  1416. &target->thread.tar, 0, sizeof(u64));
  1417. }
  1418. static int tar_set(struct task_struct *target,
  1419. const struct user_regset *regset,
  1420. unsigned int pos, unsigned int count,
  1421. const void *kbuf, const void __user *ubuf)
  1422. {
  1423. return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  1424. &target->thread.tar, 0, sizeof(u64));
  1425. }
  1426. static int ebb_active(struct task_struct *target,
  1427. const struct user_regset *regset)
  1428. {
  1429. if (!cpu_has_feature(CPU_FTR_ARCH_207S))
  1430. return -ENODEV;
  1431. if (target->thread.used_ebb)
  1432. return regset->n;
  1433. return 0;
  1434. }
  1435. static int ebb_get(struct task_struct *target,
  1436. const struct user_regset *regset,
  1437. unsigned int pos, unsigned int count,
  1438. void *kbuf, void __user *ubuf)
  1439. {
  1440. /* Build tests */
  1441. BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
  1442. BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
  1443. if (!cpu_has_feature(CPU_FTR_ARCH_207S))
  1444. return -ENODEV;
  1445. if (!target->thread.used_ebb)
  1446. return -ENODATA;
  1447. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  1448. &target->thread.ebbrr, 0, 3 * sizeof(unsigned long));
  1449. }
  1450. static int ebb_set(struct task_struct *target,
  1451. const struct user_regset *regset,
  1452. unsigned int pos, unsigned int count,
  1453. const void *kbuf, const void __user *ubuf)
  1454. {
  1455. int ret = 0;
  1456. /* Build tests */
  1457. BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
  1458. BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
  1459. if (!cpu_has_feature(CPU_FTR_ARCH_207S))
  1460. return -ENODEV;
  1461. if (target->thread.used_ebb)
  1462. return -ENODATA;
  1463. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  1464. &target->thread.ebbrr, 0, sizeof(unsigned long));
  1465. if (!ret)
  1466. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  1467. &target->thread.ebbhr, sizeof(unsigned long),
  1468. 2 * sizeof(unsigned long));
  1469. if (!ret)
  1470. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  1471. &target->thread.bescr,
  1472. 2 * sizeof(unsigned long), 3 * sizeof(unsigned long));
  1473. return ret;
  1474. }
  1475. static int pmu_active(struct task_struct *target,
  1476. const struct user_regset *regset)
  1477. {
  1478. if (!cpu_has_feature(CPU_FTR_ARCH_207S))
  1479. return -ENODEV;
  1480. return regset->n;
  1481. }
  1482. static int pmu_get(struct task_struct *target,
  1483. const struct user_regset *regset,
  1484. unsigned int pos, unsigned int count,
  1485. void *kbuf, void __user *ubuf)
  1486. {
  1487. /* Build tests */
  1488. BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
  1489. BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
  1490. BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
  1491. BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
  1492. if (!cpu_has_feature(CPU_FTR_ARCH_207S))
  1493. return -ENODEV;
  1494. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  1495. &target->thread.siar, 0,
  1496. 5 * sizeof(unsigned long));
  1497. }
  1498. static int pmu_set(struct task_struct *target,
  1499. const struct user_regset *regset,
  1500. unsigned int pos, unsigned int count,
  1501. const void *kbuf, const void __user *ubuf)
  1502. {
  1503. int ret = 0;
  1504. /* Build tests */
  1505. BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
  1506. BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
  1507. BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
  1508. BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
  1509. if (!cpu_has_feature(CPU_FTR_ARCH_207S))
  1510. return -ENODEV;
  1511. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  1512. &target->thread.siar, 0,
  1513. sizeof(unsigned long));
  1514. if (!ret)
  1515. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  1516. &target->thread.sdar, sizeof(unsigned long),
  1517. 2 * sizeof(unsigned long));
  1518. if (!ret)
  1519. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  1520. &target->thread.sier, 2 * sizeof(unsigned long),
  1521. 3 * sizeof(unsigned long));
  1522. if (!ret)
  1523. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  1524. &target->thread.mmcr2, 3 * sizeof(unsigned long),
  1525. 4 * sizeof(unsigned long));
  1526. if (!ret)
  1527. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  1528. &target->thread.mmcr0, 4 * sizeof(unsigned long),
  1529. 5 * sizeof(unsigned long));
  1530. return ret;
  1531. }
  1532. #endif
  1533. #ifdef CONFIG_PPC_MEM_KEYS
  1534. static int pkey_active(struct task_struct *target,
  1535. const struct user_regset *regset)
  1536. {
  1537. if (!arch_pkeys_enabled())
  1538. return -ENODEV;
  1539. return regset->n;
  1540. }
  1541. static int pkey_get(struct task_struct *target,
  1542. const struct user_regset *regset,
  1543. unsigned int pos, unsigned int count,
  1544. void *kbuf, void __user *ubuf)
  1545. {
  1546. BUILD_BUG_ON(TSO(amr) + sizeof(unsigned long) != TSO(iamr));
  1547. BUILD_BUG_ON(TSO(iamr) + sizeof(unsigned long) != TSO(uamor));
  1548. if (!arch_pkeys_enabled())
  1549. return -ENODEV;
  1550. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  1551. &target->thread.amr, 0,
  1552. ELF_NPKEY * sizeof(unsigned long));
  1553. }
  1554. static int pkey_set(struct task_struct *target,
  1555. const struct user_regset *regset,
  1556. unsigned int pos, unsigned int count,
  1557. const void *kbuf, const void __user *ubuf)
  1558. {
  1559. u64 new_amr;
  1560. int ret;
  1561. if (!arch_pkeys_enabled())
  1562. return -ENODEV;
  1563. /* Only the AMR can be set from userspace */
  1564. if (pos != 0 || count != sizeof(new_amr))
  1565. return -EINVAL;
  1566. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  1567. &new_amr, 0, sizeof(new_amr));
  1568. if (ret)
  1569. return ret;
  1570. /* UAMOR determines which bits of the AMR can be set from userspace. */
  1571. target->thread.amr = (new_amr & target->thread.uamor) |
  1572. (target->thread.amr & ~target->thread.uamor);
  1573. return 0;
  1574. }
  1575. #endif /* CONFIG_PPC_MEM_KEYS */
  1576. /*
  1577. * These are our native regset flavors.
  1578. */
  1579. enum powerpc_regset {
  1580. REGSET_GPR,
  1581. REGSET_FPR,
  1582. #ifdef CONFIG_ALTIVEC
  1583. REGSET_VMX,
  1584. #endif
  1585. #ifdef CONFIG_VSX
  1586. REGSET_VSX,
  1587. #endif
  1588. #ifdef CONFIG_SPE
  1589. REGSET_SPE,
  1590. #endif
  1591. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  1592. REGSET_TM_CGPR, /* TM checkpointed GPR registers */
  1593. REGSET_TM_CFPR, /* TM checkpointed FPR registers */
  1594. REGSET_TM_CVMX, /* TM checkpointed VMX registers */
  1595. REGSET_TM_CVSX, /* TM checkpointed VSX registers */
  1596. REGSET_TM_SPR, /* TM specific SPR registers */
  1597. REGSET_TM_CTAR, /* TM checkpointed TAR register */
  1598. REGSET_TM_CPPR, /* TM checkpointed PPR register */
  1599. REGSET_TM_CDSCR, /* TM checkpointed DSCR register */
  1600. #endif
  1601. #ifdef CONFIG_PPC64
  1602. REGSET_PPR, /* PPR register */
  1603. REGSET_DSCR, /* DSCR register */
  1604. #endif
  1605. #ifdef CONFIG_PPC_BOOK3S_64
  1606. REGSET_TAR, /* TAR register */
  1607. REGSET_EBB, /* EBB registers */
  1608. REGSET_PMR, /* Performance Monitor Registers */
  1609. #endif
  1610. #ifdef CONFIG_PPC_MEM_KEYS
  1611. REGSET_PKEY, /* AMR register */
  1612. #endif
  1613. };
  1614. static const struct user_regset native_regsets[] = {
  1615. [REGSET_GPR] = {
  1616. .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
  1617. .size = sizeof(long), .align = sizeof(long),
  1618. .get = gpr_get, .set = gpr_set
  1619. },
  1620. [REGSET_FPR] = {
  1621. .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
  1622. .size = sizeof(double), .align = sizeof(double),
  1623. .get = fpr_get, .set = fpr_set
  1624. },
  1625. #ifdef CONFIG_ALTIVEC
  1626. [REGSET_VMX] = {
  1627. .core_note_type = NT_PPC_VMX, .n = 34,
  1628. .size = sizeof(vector128), .align = sizeof(vector128),
  1629. .active = vr_active, .get = vr_get, .set = vr_set
  1630. },
  1631. #endif
  1632. #ifdef CONFIG_VSX
  1633. [REGSET_VSX] = {
  1634. .core_note_type = NT_PPC_VSX, .n = 32,
  1635. .size = sizeof(double), .align = sizeof(double),
  1636. .active = vsr_active, .get = vsr_get, .set = vsr_set
  1637. },
  1638. #endif
  1639. #ifdef CONFIG_SPE
  1640. [REGSET_SPE] = {
  1641. .core_note_type = NT_PPC_SPE, .n = 35,
  1642. .size = sizeof(u32), .align = sizeof(u32),
  1643. .active = evr_active, .get = evr_get, .set = evr_set
  1644. },
  1645. #endif
  1646. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  1647. [REGSET_TM_CGPR] = {
  1648. .core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
  1649. .size = sizeof(long), .align = sizeof(long),
  1650. .active = tm_cgpr_active, .get = tm_cgpr_get, .set = tm_cgpr_set
  1651. },
  1652. [REGSET_TM_CFPR] = {
  1653. .core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
  1654. .size = sizeof(double), .align = sizeof(double),
  1655. .active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
  1656. },
  1657. [REGSET_TM_CVMX] = {
  1658. .core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
  1659. .size = sizeof(vector128), .align = sizeof(vector128),
  1660. .active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
  1661. },
  1662. [REGSET_TM_CVSX] = {
  1663. .core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
  1664. .size = sizeof(double), .align = sizeof(double),
  1665. .active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
  1666. },
  1667. [REGSET_TM_SPR] = {
  1668. .core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
  1669. .size = sizeof(u64), .align = sizeof(u64),
  1670. .active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
  1671. },
  1672. [REGSET_TM_CTAR] = {
  1673. .core_note_type = NT_PPC_TM_CTAR, .n = 1,
  1674. .size = sizeof(u64), .align = sizeof(u64),
  1675. .active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
  1676. },
  1677. [REGSET_TM_CPPR] = {
  1678. .core_note_type = NT_PPC_TM_CPPR, .n = 1,
  1679. .size = sizeof(u64), .align = sizeof(u64),
  1680. .active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
  1681. },
  1682. [REGSET_TM_CDSCR] = {
  1683. .core_note_type = NT_PPC_TM_CDSCR, .n = 1,
  1684. .size = sizeof(u64), .align = sizeof(u64),
  1685. .active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
  1686. },
  1687. #endif
  1688. #ifdef CONFIG_PPC64
  1689. [REGSET_PPR] = {
  1690. .core_note_type = NT_PPC_PPR, .n = 1,
  1691. .size = sizeof(u64), .align = sizeof(u64),
  1692. .get = ppr_get, .set = ppr_set
  1693. },
  1694. [REGSET_DSCR] = {
  1695. .core_note_type = NT_PPC_DSCR, .n = 1,
  1696. .size = sizeof(u64), .align = sizeof(u64),
  1697. .get = dscr_get, .set = dscr_set
  1698. },
  1699. #endif
  1700. #ifdef CONFIG_PPC_BOOK3S_64
  1701. [REGSET_TAR] = {
  1702. .core_note_type = NT_PPC_TAR, .n = 1,
  1703. .size = sizeof(u64), .align = sizeof(u64),
  1704. .get = tar_get, .set = tar_set
  1705. },
  1706. [REGSET_EBB] = {
  1707. .core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
  1708. .size = sizeof(u64), .align = sizeof(u64),
  1709. .active = ebb_active, .get = ebb_get, .set = ebb_set
  1710. },
  1711. [REGSET_PMR] = {
  1712. .core_note_type = NT_PPC_PMU, .n = ELF_NPMU,
  1713. .size = sizeof(u64), .align = sizeof(u64),
  1714. .active = pmu_active, .get = pmu_get, .set = pmu_set
  1715. },
  1716. #endif
  1717. #ifdef CONFIG_PPC_MEM_KEYS
  1718. [REGSET_PKEY] = {
  1719. .core_note_type = NT_PPC_PKEY, .n = ELF_NPKEY,
  1720. .size = sizeof(u64), .align = sizeof(u64),
  1721. .active = pkey_active, .get = pkey_get, .set = pkey_set
  1722. },
  1723. #endif
  1724. };
  1725. static const struct user_regset_view user_ppc_native_view = {
  1726. .name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
  1727. .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
  1728. };
  1729. #ifdef CONFIG_PPC64
  1730. #include <linux/compat.h>
  1731. static int gpr32_get_common(struct task_struct *target,
  1732. const struct user_regset *regset,
  1733. unsigned int pos, unsigned int count,
  1734. void *kbuf, void __user *ubuf,
  1735. unsigned long *regs)
  1736. {
  1737. compat_ulong_t *k = kbuf;
  1738. compat_ulong_t __user *u = ubuf;
  1739. compat_ulong_t reg;
  1740. pos /= sizeof(reg);
  1741. count /= sizeof(reg);
  1742. if (kbuf)
  1743. for (; count > 0 && pos < PT_MSR; --count)
  1744. *k++ = regs[pos++];
  1745. else
  1746. for (; count > 0 && pos < PT_MSR; --count)
  1747. if (__put_user((compat_ulong_t) regs[pos++], u++))
  1748. return -EFAULT;
  1749. if (count > 0 && pos == PT_MSR) {
  1750. reg = get_user_msr(target);
  1751. if (kbuf)
  1752. *k++ = reg;
  1753. else if (__put_user(reg, u++))
  1754. return -EFAULT;
  1755. ++pos;
  1756. --count;
  1757. }
  1758. if (kbuf)
  1759. for (; count > 0 && pos < PT_REGS_COUNT; --count)
  1760. *k++ = regs[pos++];
  1761. else
  1762. for (; count > 0 && pos < PT_REGS_COUNT; --count)
  1763. if (__put_user((compat_ulong_t) regs[pos++], u++))
  1764. return -EFAULT;
  1765. kbuf = k;
  1766. ubuf = u;
  1767. pos *= sizeof(reg);
  1768. count *= sizeof(reg);
  1769. return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  1770. PT_REGS_COUNT * sizeof(reg), -1);
  1771. }
  1772. static int gpr32_set_common(struct task_struct *target,
  1773. const struct user_regset *regset,
  1774. unsigned int pos, unsigned int count,
  1775. const void *kbuf, const void __user *ubuf,
  1776. unsigned long *regs)
  1777. {
  1778. const compat_ulong_t *k = kbuf;
  1779. const compat_ulong_t __user *u = ubuf;
  1780. compat_ulong_t reg;
  1781. pos /= sizeof(reg);
  1782. count /= sizeof(reg);
  1783. if (kbuf)
  1784. for (; count > 0 && pos < PT_MSR; --count)
  1785. regs[pos++] = *k++;
  1786. else
  1787. for (; count > 0 && pos < PT_MSR; --count) {
  1788. if (__get_user(reg, u++))
  1789. return -EFAULT;
  1790. regs[pos++] = reg;
  1791. }
  1792. if (count > 0 && pos == PT_MSR) {
  1793. if (kbuf)
  1794. reg = *k++;
  1795. else if (__get_user(reg, u++))
  1796. return -EFAULT;
  1797. set_user_msr(target, reg);
  1798. ++pos;
  1799. --count;
  1800. }
  1801. if (kbuf) {
  1802. for (; count > 0 && pos <= PT_MAX_PUT_REG; --count)
  1803. regs[pos++] = *k++;
  1804. for (; count > 0 && pos < PT_TRAP; --count, ++pos)
  1805. ++k;
  1806. } else {
  1807. for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) {
  1808. if (__get_user(reg, u++))
  1809. return -EFAULT;
  1810. regs[pos++] = reg;
  1811. }
  1812. for (; count > 0 && pos < PT_TRAP; --count, ++pos)
  1813. if (__get_user(reg, u++))
  1814. return -EFAULT;
  1815. }
  1816. if (count > 0 && pos == PT_TRAP) {
  1817. if (kbuf)
  1818. reg = *k++;
  1819. else if (__get_user(reg, u++))
  1820. return -EFAULT;
  1821. set_user_trap(target, reg);
  1822. ++pos;
  1823. --count;
  1824. }
  1825. kbuf = k;
  1826. ubuf = u;
  1827. pos *= sizeof(reg);
  1828. count *= sizeof(reg);
  1829. return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  1830. (PT_TRAP + 1) * sizeof(reg), -1);
  1831. }
  1832. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  1833. static int tm_cgpr32_get(struct task_struct *target,
  1834. const struct user_regset *regset,
  1835. unsigned int pos, unsigned int count,
  1836. void *kbuf, void __user *ubuf)
  1837. {
  1838. return gpr32_get_common(target, regset, pos, count, kbuf, ubuf,
  1839. &target->thread.ckpt_regs.gpr[0]);
  1840. }
  1841. static int tm_cgpr32_set(struct task_struct *target,
  1842. const struct user_regset *regset,
  1843. unsigned int pos, unsigned int count,
  1844. const void *kbuf, const void __user *ubuf)
  1845. {
  1846. return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
  1847. &target->thread.ckpt_regs.gpr[0]);
  1848. }
  1849. #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
  1850. static int gpr32_get(struct task_struct *target,
  1851. const struct user_regset *regset,
  1852. unsigned int pos, unsigned int count,
  1853. void *kbuf, void __user *ubuf)
  1854. {
  1855. int i;
  1856. if (target->thread.regs == NULL)
  1857. return -EIO;
  1858. if (!FULL_REGS(target->thread.regs)) {
  1859. /*
  1860. * We have a partial register set.
  1861. * Fill 14-31 with bogus values.
  1862. */
  1863. for (i = 14; i < 32; i++)
  1864. target->thread.regs->gpr[i] = NV_REG_POISON;
  1865. }
  1866. return gpr32_get_common(target, regset, pos, count, kbuf, ubuf,
  1867. &target->thread.regs->gpr[0]);
  1868. }
  1869. static int gpr32_set(struct task_struct *target,
  1870. const struct user_regset *regset,
  1871. unsigned int pos, unsigned int count,
  1872. const void *kbuf, const void __user *ubuf)
  1873. {
  1874. if (target->thread.regs == NULL)
  1875. return -EIO;
  1876. CHECK_FULL_REGS(target->thread.regs);
  1877. return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
  1878. &target->thread.regs->gpr[0]);
  1879. }
  1880. /*
  1881. * These are the regset flavors matching the CONFIG_PPC32 native set.
  1882. */
  1883. static const struct user_regset compat_regsets[] = {
  1884. [REGSET_GPR] = {
  1885. .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
  1886. .size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
  1887. .get = gpr32_get, .set = gpr32_set
  1888. },
  1889. [REGSET_FPR] = {
  1890. .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
  1891. .size = sizeof(double), .align = sizeof(double),
  1892. .get = fpr_get, .set = fpr_set
  1893. },
  1894. #ifdef CONFIG_ALTIVEC
  1895. [REGSET_VMX] = {
  1896. .core_note_type = NT_PPC_VMX, .n = 34,
  1897. .size = sizeof(vector128), .align = sizeof(vector128),
  1898. .active = vr_active, .get = vr_get, .set = vr_set
  1899. },
  1900. #endif
  1901. #ifdef CONFIG_SPE
  1902. [REGSET_SPE] = {
  1903. .core_note_type = NT_PPC_SPE, .n = 35,
  1904. .size = sizeof(u32), .align = sizeof(u32),
  1905. .active = evr_active, .get = evr_get, .set = evr_set
  1906. },
  1907. #endif
  1908. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  1909. [REGSET_TM_CGPR] = {
  1910. .core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
  1911. .size = sizeof(long), .align = sizeof(long),
  1912. .active = tm_cgpr_active,
  1913. .get = tm_cgpr32_get, .set = tm_cgpr32_set
  1914. },
  1915. [REGSET_TM_CFPR] = {
  1916. .core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
  1917. .size = sizeof(double), .align = sizeof(double),
  1918. .active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
  1919. },
  1920. [REGSET_TM_CVMX] = {
  1921. .core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
  1922. .size = sizeof(vector128), .align = sizeof(vector128),
  1923. .active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
  1924. },
  1925. [REGSET_TM_CVSX] = {
  1926. .core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
  1927. .size = sizeof(double), .align = sizeof(double),
  1928. .active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
  1929. },
  1930. [REGSET_TM_SPR] = {
  1931. .core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
  1932. .size = sizeof(u64), .align = sizeof(u64),
  1933. .active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
  1934. },
  1935. [REGSET_TM_CTAR] = {
  1936. .core_note_type = NT_PPC_TM_CTAR, .n = 1,
  1937. .size = sizeof(u64), .align = sizeof(u64),
  1938. .active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
  1939. },
  1940. [REGSET_TM_CPPR] = {
  1941. .core_note_type = NT_PPC_TM_CPPR, .n = 1,
  1942. .size = sizeof(u64), .align = sizeof(u64),
  1943. .active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
  1944. },
  1945. [REGSET_TM_CDSCR] = {
  1946. .core_note_type = NT_PPC_TM_CDSCR, .n = 1,
  1947. .size = sizeof(u64), .align = sizeof(u64),
  1948. .active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
  1949. },
  1950. #endif
  1951. #ifdef CONFIG_PPC64
  1952. [REGSET_PPR] = {
  1953. .core_note_type = NT_PPC_PPR, .n = 1,
  1954. .size = sizeof(u64), .align = sizeof(u64),
  1955. .get = ppr_get, .set = ppr_set
  1956. },
  1957. [REGSET_DSCR] = {
  1958. .core_note_type = NT_PPC_DSCR, .n = 1,
  1959. .size = sizeof(u64), .align = sizeof(u64),
  1960. .get = dscr_get, .set = dscr_set
  1961. },
  1962. #endif
  1963. #ifdef CONFIG_PPC_BOOK3S_64
  1964. [REGSET_TAR] = {
  1965. .core_note_type = NT_PPC_TAR, .n = 1,
  1966. .size = sizeof(u64), .align = sizeof(u64),
  1967. .get = tar_get, .set = tar_set
  1968. },
  1969. [REGSET_EBB] = {
  1970. .core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
  1971. .size = sizeof(u64), .align = sizeof(u64),
  1972. .active = ebb_active, .get = ebb_get, .set = ebb_set
  1973. },
  1974. #endif
  1975. };
  1976. static const struct user_regset_view user_ppc_compat_view = {
  1977. .name = "ppc", .e_machine = EM_PPC, .ei_osabi = ELF_OSABI,
  1978. .regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
  1979. };
  1980. #endif /* CONFIG_PPC64 */
  1981. const struct user_regset_view *task_user_regset_view(struct task_struct *task)
  1982. {
  1983. #ifdef CONFIG_PPC64
  1984. if (test_tsk_thread_flag(task, TIF_32BIT))
  1985. return &user_ppc_compat_view;
  1986. #endif
  1987. return &user_ppc_native_view;
  1988. }
  1989. void user_enable_single_step(struct task_struct *task)
  1990. {
  1991. struct pt_regs *regs = task->thread.regs;
  1992. if (regs != NULL) {
  1993. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  1994. task->thread.debug.dbcr0 &= ~DBCR0_BT;
  1995. task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
  1996. regs->msr |= MSR_DE;
  1997. #else
  1998. regs->msr &= ~MSR_BE;
  1999. regs->msr |= MSR_SE;
  2000. #endif
  2001. }
  2002. set_tsk_thread_flag(task, TIF_SINGLESTEP);
  2003. }
  2004. void user_enable_block_step(struct task_struct *task)
  2005. {
  2006. struct pt_regs *regs = task->thread.regs;
  2007. if (regs != NULL) {
  2008. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  2009. task->thread.debug.dbcr0 &= ~DBCR0_IC;
  2010. task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT;
  2011. regs->msr |= MSR_DE;
  2012. #else
  2013. regs->msr &= ~MSR_SE;
  2014. regs->msr |= MSR_BE;
  2015. #endif
  2016. }
  2017. set_tsk_thread_flag(task, TIF_SINGLESTEP);
  2018. }
  2019. void user_disable_single_step(struct task_struct *task)
  2020. {
  2021. struct pt_regs *regs = task->thread.regs;
  2022. if (regs != NULL) {
  2023. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  2024. /*
  2025. * The logic to disable single stepping should be as
  2026. * simple as turning off the Instruction Complete flag.
  2027. * And, after doing so, if all debug flags are off, turn
  2028. * off DBCR0(IDM) and MSR(DE) .... Torez
  2029. */
  2030. task->thread.debug.dbcr0 &= ~(DBCR0_IC|DBCR0_BT);
  2031. /*
  2032. * Test to see if any of the DBCR_ACTIVE_EVENTS bits are set.
  2033. */
  2034. if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
  2035. task->thread.debug.dbcr1)) {
  2036. /*
  2037. * All debug events were off.....
  2038. */
  2039. task->thread.debug.dbcr0 &= ~DBCR0_IDM;
  2040. regs->msr &= ~MSR_DE;
  2041. }
  2042. #else
  2043. regs->msr &= ~(MSR_SE | MSR_BE);
  2044. #endif
  2045. }
  2046. clear_tsk_thread_flag(task, TIF_SINGLESTEP);
  2047. }
  2048. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  2049. void ptrace_triggered(struct perf_event *bp,
  2050. struct perf_sample_data *data, struct pt_regs *regs)
  2051. {
  2052. struct perf_event_attr attr;
  2053. /*
  2054. * Disable the breakpoint request here since ptrace has defined a
  2055. * one-shot behaviour for breakpoint exceptions in PPC64.
  2056. * The SIGTRAP signal is generated automatically for us in do_dabr().
  2057. * We don't have to do anything about that here
  2058. */
  2059. attr = bp->attr;
  2060. attr.disabled = true;
  2061. modify_user_hw_breakpoint(bp, &attr);
  2062. }
  2063. #endif /* CONFIG_HAVE_HW_BREAKPOINT */
  2064. static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
  2065. unsigned long data)
  2066. {
  2067. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  2068. int ret;
  2069. struct thread_struct *thread = &(task->thread);
  2070. struct perf_event *bp;
  2071. struct perf_event_attr attr;
  2072. #endif /* CONFIG_HAVE_HW_BREAKPOINT */
  2073. #ifndef CONFIG_PPC_ADV_DEBUG_REGS
  2074. struct arch_hw_breakpoint hw_brk;
  2075. #endif
  2076. /* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
  2077. * For embedded processors we support one DAC and no IAC's at the
  2078. * moment.
  2079. */
  2080. if (addr > 0)
  2081. return -EINVAL;
  2082. /* The bottom 3 bits in dabr are flags */
  2083. if ((data & ~0x7UL) >= TASK_SIZE)
  2084. return -EIO;
  2085. #ifndef CONFIG_PPC_ADV_DEBUG_REGS
  2086. /* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
  2087. * It was assumed, on previous implementations, that 3 bits were
  2088. * passed together with the data address, fitting the design of the
  2089. * DABR register, as follows:
  2090. *
  2091. * bit 0: Read flag
  2092. * bit 1: Write flag
  2093. * bit 2: Breakpoint translation
  2094. *
  2095. * Thus, we use them here as so.
  2096. */
  2097. /* Ensure breakpoint translation bit is set */
  2098. if (data && !(data & HW_BRK_TYPE_TRANSLATE))
  2099. return -EIO;
  2100. hw_brk.address = data & (~HW_BRK_TYPE_DABR);
  2101. hw_brk.type = (data & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL;
  2102. hw_brk.len = 8;
  2103. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  2104. bp = thread->ptrace_bps[0];
  2105. if ((!data) || !(hw_brk.type & HW_BRK_TYPE_RDWR)) {
  2106. if (bp) {
  2107. unregister_hw_breakpoint(bp);
  2108. thread->ptrace_bps[0] = NULL;
  2109. }
  2110. return 0;
  2111. }
  2112. if (bp) {
  2113. attr = bp->attr;
  2114. attr.bp_addr = hw_brk.address;
  2115. arch_bp_generic_fields(hw_brk.type, &attr.bp_type);
  2116. /* Enable breakpoint */
  2117. attr.disabled = false;
  2118. ret = modify_user_hw_breakpoint(bp, &attr);
  2119. if (ret) {
  2120. return ret;
  2121. }
  2122. thread->ptrace_bps[0] = bp;
  2123. thread->hw_brk = hw_brk;
  2124. return 0;
  2125. }
  2126. /* Create a new breakpoint request if one doesn't exist already */
  2127. hw_breakpoint_init(&attr);
  2128. attr.bp_addr = hw_brk.address;
  2129. arch_bp_generic_fields(hw_brk.type,
  2130. &attr.bp_type);
  2131. thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
  2132. ptrace_triggered, NULL, task);
  2133. if (IS_ERR(bp)) {
  2134. thread->ptrace_bps[0] = NULL;
  2135. return PTR_ERR(bp);
  2136. }
  2137. #endif /* CONFIG_HAVE_HW_BREAKPOINT */
  2138. task->thread.hw_brk = hw_brk;
  2139. #else /* CONFIG_PPC_ADV_DEBUG_REGS */
  2140. /* As described above, it was assumed 3 bits were passed with the data
  2141. * address, but we will assume only the mode bits will be passed
  2142. * as to not cause alignment restrictions for DAC-based processors.
  2143. */
  2144. /* DAC's hold the whole address without any mode flags */
  2145. task->thread.debug.dac1 = data & ~0x3UL;
  2146. if (task->thread.debug.dac1 == 0) {
  2147. dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
  2148. if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
  2149. task->thread.debug.dbcr1)) {
  2150. task->thread.regs->msr &= ~MSR_DE;
  2151. task->thread.debug.dbcr0 &= ~DBCR0_IDM;
  2152. }
  2153. return 0;
  2154. }
  2155. /* Read or Write bits must be set */
  2156. if (!(data & 0x3UL))
  2157. return -EINVAL;
  2158. /* Set the Internal Debugging flag (IDM bit 1) for the DBCR0
  2159. register */
  2160. task->thread.debug.dbcr0 |= DBCR0_IDM;
  2161. /* Check for write and read flags and set DBCR0
  2162. accordingly */
  2163. dbcr_dac(task) &= ~(DBCR_DAC1R|DBCR_DAC1W);
  2164. if (data & 0x1UL)
  2165. dbcr_dac(task) |= DBCR_DAC1R;
  2166. if (data & 0x2UL)
  2167. dbcr_dac(task) |= DBCR_DAC1W;
  2168. task->thread.regs->msr |= MSR_DE;
  2169. #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
  2170. return 0;
  2171. }
  2172. /*
  2173. * Called by kernel/ptrace.c when detaching..
  2174. *
  2175. * Make sure single step bits etc are not set.
  2176. */
  2177. void ptrace_disable(struct task_struct *child)
  2178. {
  2179. /* make sure the single step bit is not set. */
  2180. user_disable_single_step(child);
  2181. }
  2182. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  2183. static long set_instruction_bp(struct task_struct *child,
  2184. struct ppc_hw_breakpoint *bp_info)
  2185. {
  2186. int slot;
  2187. int slot1_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC1) != 0);
  2188. int slot2_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC2) != 0);
  2189. int slot3_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC3) != 0);
  2190. int slot4_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC4) != 0);
  2191. if (dbcr_iac_range(child) & DBCR_IAC12MODE)
  2192. slot2_in_use = 1;
  2193. if (dbcr_iac_range(child) & DBCR_IAC34MODE)
  2194. slot4_in_use = 1;
  2195. if (bp_info->addr >= TASK_SIZE)
  2196. return -EIO;
  2197. if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
  2198. /* Make sure range is valid. */
  2199. if (bp_info->addr2 >= TASK_SIZE)
  2200. return -EIO;
  2201. /* We need a pair of IAC regsisters */
  2202. if ((!slot1_in_use) && (!slot2_in_use)) {
  2203. slot = 1;
  2204. child->thread.debug.iac1 = bp_info->addr;
  2205. child->thread.debug.iac2 = bp_info->addr2;
  2206. child->thread.debug.dbcr0 |= DBCR0_IAC1;
  2207. if (bp_info->addr_mode ==
  2208. PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
  2209. dbcr_iac_range(child) |= DBCR_IAC12X;
  2210. else
  2211. dbcr_iac_range(child) |= DBCR_IAC12I;
  2212. #if CONFIG_PPC_ADV_DEBUG_IACS > 2
  2213. } else if ((!slot3_in_use) && (!slot4_in_use)) {
  2214. slot = 3;
  2215. child->thread.debug.iac3 = bp_info->addr;
  2216. child->thread.debug.iac4 = bp_info->addr2;
  2217. child->thread.debug.dbcr0 |= DBCR0_IAC3;
  2218. if (bp_info->addr_mode ==
  2219. PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
  2220. dbcr_iac_range(child) |= DBCR_IAC34X;
  2221. else
  2222. dbcr_iac_range(child) |= DBCR_IAC34I;
  2223. #endif
  2224. } else
  2225. return -ENOSPC;
  2226. } else {
  2227. /* We only need one. If possible leave a pair free in
  2228. * case a range is needed later
  2229. */
  2230. if (!slot1_in_use) {
  2231. /*
  2232. * Don't use iac1 if iac1-iac2 are free and either
  2233. * iac3 or iac4 (but not both) are free
  2234. */
  2235. if (slot2_in_use || (slot3_in_use == slot4_in_use)) {
  2236. slot = 1;
  2237. child->thread.debug.iac1 = bp_info->addr;
  2238. child->thread.debug.dbcr0 |= DBCR0_IAC1;
  2239. goto out;
  2240. }
  2241. }
  2242. if (!slot2_in_use) {
  2243. slot = 2;
  2244. child->thread.debug.iac2 = bp_info->addr;
  2245. child->thread.debug.dbcr0 |= DBCR0_IAC2;
  2246. #if CONFIG_PPC_ADV_DEBUG_IACS > 2
  2247. } else if (!slot3_in_use) {
  2248. slot = 3;
  2249. child->thread.debug.iac3 = bp_info->addr;
  2250. child->thread.debug.dbcr0 |= DBCR0_IAC3;
  2251. } else if (!slot4_in_use) {
  2252. slot = 4;
  2253. child->thread.debug.iac4 = bp_info->addr;
  2254. child->thread.debug.dbcr0 |= DBCR0_IAC4;
  2255. #endif
  2256. } else
  2257. return -ENOSPC;
  2258. }
  2259. out:
  2260. child->thread.debug.dbcr0 |= DBCR0_IDM;
  2261. child->thread.regs->msr |= MSR_DE;
  2262. return slot;
  2263. }
  2264. static int del_instruction_bp(struct task_struct *child, int slot)
  2265. {
  2266. switch (slot) {
  2267. case 1:
  2268. if ((child->thread.debug.dbcr0 & DBCR0_IAC1) == 0)
  2269. return -ENOENT;
  2270. if (dbcr_iac_range(child) & DBCR_IAC12MODE) {
  2271. /* address range - clear slots 1 & 2 */
  2272. child->thread.debug.iac2 = 0;
  2273. dbcr_iac_range(child) &= ~DBCR_IAC12MODE;
  2274. }
  2275. child->thread.debug.iac1 = 0;
  2276. child->thread.debug.dbcr0 &= ~DBCR0_IAC1;
  2277. break;
  2278. case 2:
  2279. if ((child->thread.debug.dbcr0 & DBCR0_IAC2) == 0)
  2280. return -ENOENT;
  2281. if (dbcr_iac_range(child) & DBCR_IAC12MODE)
  2282. /* used in a range */
  2283. return -EINVAL;
  2284. child->thread.debug.iac2 = 0;
  2285. child->thread.debug.dbcr0 &= ~DBCR0_IAC2;
  2286. break;
  2287. #if CONFIG_PPC_ADV_DEBUG_IACS > 2
  2288. case 3:
  2289. if ((child->thread.debug.dbcr0 & DBCR0_IAC3) == 0)
  2290. return -ENOENT;
  2291. if (dbcr_iac_range(child) & DBCR_IAC34MODE) {
  2292. /* address range - clear slots 3 & 4 */
  2293. child->thread.debug.iac4 = 0;
  2294. dbcr_iac_range(child) &= ~DBCR_IAC34MODE;
  2295. }
  2296. child->thread.debug.iac3 = 0;
  2297. child->thread.debug.dbcr0 &= ~DBCR0_IAC3;
  2298. break;
  2299. case 4:
  2300. if ((child->thread.debug.dbcr0 & DBCR0_IAC4) == 0)
  2301. return -ENOENT;
  2302. if (dbcr_iac_range(child) & DBCR_IAC34MODE)
  2303. /* Used in a range */
  2304. return -EINVAL;
  2305. child->thread.debug.iac4 = 0;
  2306. child->thread.debug.dbcr0 &= ~DBCR0_IAC4;
  2307. break;
  2308. #endif
  2309. default:
  2310. return -EINVAL;
  2311. }
  2312. return 0;
  2313. }
  2314. static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
  2315. {
  2316. int byte_enable =
  2317. (bp_info->condition_mode >> PPC_BREAKPOINT_CONDITION_BE_SHIFT)
  2318. & 0xf;
  2319. int condition_mode =
  2320. bp_info->condition_mode & PPC_BREAKPOINT_CONDITION_MODE;
  2321. int slot;
  2322. if (byte_enable && (condition_mode == 0))
  2323. return -EINVAL;
  2324. if (bp_info->addr >= TASK_SIZE)
  2325. return -EIO;
  2326. if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) {
  2327. slot = 1;
  2328. if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
  2329. dbcr_dac(child) |= DBCR_DAC1R;
  2330. if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
  2331. dbcr_dac(child) |= DBCR_DAC1W;
  2332. child->thread.debug.dac1 = (unsigned long)bp_info->addr;
  2333. #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
  2334. if (byte_enable) {
  2335. child->thread.debug.dvc1 =
  2336. (unsigned long)bp_info->condition_value;
  2337. child->thread.debug.dbcr2 |=
  2338. ((byte_enable << DBCR2_DVC1BE_SHIFT) |
  2339. (condition_mode << DBCR2_DVC1M_SHIFT));
  2340. }
  2341. #endif
  2342. #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
  2343. } else if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
  2344. /* Both dac1 and dac2 are part of a range */
  2345. return -ENOSPC;
  2346. #endif
  2347. } else if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) {
  2348. slot = 2;
  2349. if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
  2350. dbcr_dac(child) |= DBCR_DAC2R;
  2351. if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
  2352. dbcr_dac(child) |= DBCR_DAC2W;
  2353. child->thread.debug.dac2 = (unsigned long)bp_info->addr;
  2354. #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
  2355. if (byte_enable) {
  2356. child->thread.debug.dvc2 =
  2357. (unsigned long)bp_info->condition_value;
  2358. child->thread.debug.dbcr2 |=
  2359. ((byte_enable << DBCR2_DVC2BE_SHIFT) |
  2360. (condition_mode << DBCR2_DVC2M_SHIFT));
  2361. }
  2362. #endif
  2363. } else
  2364. return -ENOSPC;
  2365. child->thread.debug.dbcr0 |= DBCR0_IDM;
  2366. child->thread.regs->msr |= MSR_DE;
  2367. return slot + 4;
  2368. }
  2369. static int del_dac(struct task_struct *child, int slot)
  2370. {
  2371. if (slot == 1) {
  2372. if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0)
  2373. return -ENOENT;
  2374. child->thread.debug.dac1 = 0;
  2375. dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W);
  2376. #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
  2377. if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
  2378. child->thread.debug.dac2 = 0;
  2379. child->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
  2380. }
  2381. child->thread.debug.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE);
  2382. #endif
  2383. #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
  2384. child->thread.debug.dvc1 = 0;
  2385. #endif
  2386. } else if (slot == 2) {
  2387. if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0)
  2388. return -ENOENT;
  2389. #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
  2390. if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE)
  2391. /* Part of a range */
  2392. return -EINVAL;
  2393. child->thread.debug.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE);
  2394. #endif
  2395. #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
  2396. child->thread.debug.dvc2 = 0;
  2397. #endif
  2398. child->thread.debug.dac2 = 0;
  2399. dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W);
  2400. } else
  2401. return -EINVAL;
  2402. return 0;
  2403. }
  2404. #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
  2405. #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
  2406. static int set_dac_range(struct task_struct *child,
  2407. struct ppc_hw_breakpoint *bp_info)
  2408. {
  2409. int mode = bp_info->addr_mode & PPC_BREAKPOINT_MODE_MASK;
  2410. /* We don't allow range watchpoints to be used with DVC */
  2411. if (bp_info->condition_mode)
  2412. return -EINVAL;
  2413. /*
  2414. * Best effort to verify the address range. The user/supervisor bits
  2415. * prevent trapping in kernel space, but let's fail on an obvious bad
  2416. * range. The simple test on the mask is not fool-proof, and any
  2417. * exclusive range will spill over into kernel space.
  2418. */
  2419. if (bp_info->addr >= TASK_SIZE)
  2420. return -EIO;
  2421. if (mode == PPC_BREAKPOINT_MODE_MASK) {
  2422. /*
  2423. * dac2 is a bitmask. Don't allow a mask that makes a
  2424. * kernel space address from a valid dac1 value
  2425. */
  2426. if (~((unsigned long)bp_info->addr2) >= TASK_SIZE)
  2427. return -EIO;
  2428. } else {
  2429. /*
  2430. * For range breakpoints, addr2 must also be a valid address
  2431. */
  2432. if (bp_info->addr2 >= TASK_SIZE)
  2433. return -EIO;
  2434. }
  2435. if (child->thread.debug.dbcr0 &
  2436. (DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W))
  2437. return -ENOSPC;
  2438. if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
  2439. child->thread.debug.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM);
  2440. if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
  2441. child->thread.debug.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM);
  2442. child->thread.debug.dac1 = bp_info->addr;
  2443. child->thread.debug.dac2 = bp_info->addr2;
  2444. if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
  2445. child->thread.debug.dbcr2 |= DBCR2_DAC12M;
  2446. else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
  2447. child->thread.debug.dbcr2 |= DBCR2_DAC12MX;
  2448. else /* PPC_BREAKPOINT_MODE_MASK */
  2449. child->thread.debug.dbcr2 |= DBCR2_DAC12MM;
  2450. child->thread.regs->msr |= MSR_DE;
  2451. return 5;
  2452. }
  2453. #endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */
  2454. static long ppc_set_hwdebug(struct task_struct *child,
  2455. struct ppc_hw_breakpoint *bp_info)
  2456. {
  2457. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  2458. int len = 0;
  2459. struct thread_struct *thread = &(child->thread);
  2460. struct perf_event *bp;
  2461. struct perf_event_attr attr;
  2462. #endif /* CONFIG_HAVE_HW_BREAKPOINT */
  2463. #ifndef CONFIG_PPC_ADV_DEBUG_REGS
  2464. struct arch_hw_breakpoint brk;
  2465. #endif
  2466. if (bp_info->version != 1)
  2467. return -ENOTSUPP;
  2468. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  2469. /*
  2470. * Check for invalid flags and combinations
  2471. */
  2472. if ((bp_info->trigger_type == 0) ||
  2473. (bp_info->trigger_type & ~(PPC_BREAKPOINT_TRIGGER_EXECUTE |
  2474. PPC_BREAKPOINT_TRIGGER_RW)) ||
  2475. (bp_info->addr_mode & ~PPC_BREAKPOINT_MODE_MASK) ||
  2476. (bp_info->condition_mode &
  2477. ~(PPC_BREAKPOINT_CONDITION_MODE |
  2478. PPC_BREAKPOINT_CONDITION_BE_ALL)))
  2479. return -EINVAL;
  2480. #if CONFIG_PPC_ADV_DEBUG_DVCS == 0
  2481. if (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
  2482. return -EINVAL;
  2483. #endif
  2484. if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_EXECUTE) {
  2485. if ((bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE) ||
  2486. (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE))
  2487. return -EINVAL;
  2488. return set_instruction_bp(child, bp_info);
  2489. }
  2490. if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
  2491. return set_dac(child, bp_info);
  2492. #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
  2493. return set_dac_range(child, bp_info);
  2494. #else
  2495. return -EINVAL;
  2496. #endif
  2497. #else /* !CONFIG_PPC_ADV_DEBUG_DVCS */
  2498. /*
  2499. * We only support one data breakpoint
  2500. */
  2501. if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 ||
  2502. (bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 ||
  2503. bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
  2504. return -EINVAL;
  2505. if ((unsigned long)bp_info->addr >= TASK_SIZE)
  2506. return -EIO;
  2507. brk.address = bp_info->addr & ~7UL;
  2508. brk.type = HW_BRK_TYPE_TRANSLATE;
  2509. brk.len = 8;
  2510. if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
  2511. brk.type |= HW_BRK_TYPE_READ;
  2512. if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
  2513. brk.type |= HW_BRK_TYPE_WRITE;
  2514. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  2515. /*
  2516. * Check if the request is for 'range' breakpoints. We can
  2517. * support it if range < 8 bytes.
  2518. */
  2519. if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
  2520. len = bp_info->addr2 - bp_info->addr;
  2521. else if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
  2522. len = 1;
  2523. else
  2524. return -EINVAL;
  2525. bp = thread->ptrace_bps[0];
  2526. if (bp)
  2527. return -ENOSPC;
  2528. /* Create a new breakpoint request if one doesn't exist already */
  2529. hw_breakpoint_init(&attr);
  2530. attr.bp_addr = (unsigned long)bp_info->addr & ~HW_BREAKPOINT_ALIGN;
  2531. attr.bp_len = len;
  2532. arch_bp_generic_fields(brk.type, &attr.bp_type);
  2533. thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
  2534. ptrace_triggered, NULL, child);
  2535. if (IS_ERR(bp)) {
  2536. thread->ptrace_bps[0] = NULL;
  2537. return PTR_ERR(bp);
  2538. }
  2539. return 1;
  2540. #endif /* CONFIG_HAVE_HW_BREAKPOINT */
  2541. if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT)
  2542. return -EINVAL;
  2543. if (child->thread.hw_brk.address)
  2544. return -ENOSPC;
  2545. child->thread.hw_brk = brk;
  2546. return 1;
  2547. #endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
  2548. }
  2549. static long ppc_del_hwdebug(struct task_struct *child, long data)
  2550. {
  2551. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  2552. int ret = 0;
  2553. struct thread_struct *thread = &(child->thread);
  2554. struct perf_event *bp;
  2555. #endif /* CONFIG_HAVE_HW_BREAKPOINT */
  2556. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  2557. int rc;
  2558. if (data <= 4)
  2559. rc = del_instruction_bp(child, (int)data);
  2560. else
  2561. rc = del_dac(child, (int)data - 4);
  2562. if (!rc) {
  2563. if (!DBCR_ACTIVE_EVENTS(child->thread.debug.dbcr0,
  2564. child->thread.debug.dbcr1)) {
  2565. child->thread.debug.dbcr0 &= ~DBCR0_IDM;
  2566. child->thread.regs->msr &= ~MSR_DE;
  2567. }
  2568. }
  2569. return rc;
  2570. #else
  2571. if (data != 1)
  2572. return -EINVAL;
  2573. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  2574. bp = thread->ptrace_bps[0];
  2575. if (bp) {
  2576. unregister_hw_breakpoint(bp);
  2577. thread->ptrace_bps[0] = NULL;
  2578. } else
  2579. ret = -ENOENT;
  2580. return ret;
  2581. #else /* CONFIG_HAVE_HW_BREAKPOINT */
  2582. if (child->thread.hw_brk.address == 0)
  2583. return -ENOENT;
  2584. child->thread.hw_brk.address = 0;
  2585. child->thread.hw_brk.type = 0;
  2586. #endif /* CONFIG_HAVE_HW_BREAKPOINT */
  2587. return 0;
  2588. #endif
  2589. }
  2590. long arch_ptrace(struct task_struct *child, long request,
  2591. unsigned long addr, unsigned long data)
  2592. {
  2593. int ret = -EPERM;
  2594. void __user *datavp = (void __user *) data;
  2595. unsigned long __user *datalp = datavp;
  2596. switch (request) {
  2597. /* read the word at location addr in the USER area. */
  2598. case PTRACE_PEEKUSR: {
  2599. unsigned long index, tmp;
  2600. ret = -EIO;
  2601. /* convert to index and check */
  2602. #ifdef CONFIG_PPC32
  2603. index = addr >> 2;
  2604. if ((addr & 3) || (index > PT_FPSCR)
  2605. || (child->thread.regs == NULL))
  2606. #else
  2607. index = addr >> 3;
  2608. if ((addr & 7) || (index > PT_FPSCR))
  2609. #endif
  2610. break;
  2611. CHECK_FULL_REGS(child->thread.regs);
  2612. if (index < PT_FPR0) {
  2613. ret = ptrace_get_reg(child, (int) index, &tmp);
  2614. if (ret)
  2615. break;
  2616. } else {
  2617. unsigned int fpidx = index - PT_FPR0;
  2618. flush_fp_to_thread(child);
  2619. if (fpidx < (PT_FPSCR - PT_FPR0))
  2620. memcpy(&tmp, &child->thread.TS_FPR(fpidx),
  2621. sizeof(long));
  2622. else
  2623. tmp = child->thread.fp_state.fpscr;
  2624. }
  2625. ret = put_user(tmp, datalp);
  2626. break;
  2627. }
  2628. /* write the word at location addr in the USER area */
  2629. case PTRACE_POKEUSR: {
  2630. unsigned long index;
  2631. ret = -EIO;
  2632. /* convert to index and check */
  2633. #ifdef CONFIG_PPC32
  2634. index = addr >> 2;
  2635. if ((addr & 3) || (index > PT_FPSCR)
  2636. || (child->thread.regs == NULL))
  2637. #else
  2638. index = addr >> 3;
  2639. if ((addr & 7) || (index > PT_FPSCR))
  2640. #endif
  2641. break;
  2642. CHECK_FULL_REGS(child->thread.regs);
  2643. if (index < PT_FPR0) {
  2644. ret = ptrace_put_reg(child, index, data);
  2645. } else {
  2646. unsigned int fpidx = index - PT_FPR0;
  2647. flush_fp_to_thread(child);
  2648. if (fpidx < (PT_FPSCR - PT_FPR0))
  2649. memcpy(&child->thread.TS_FPR(fpidx), &data,
  2650. sizeof(long));
  2651. else
  2652. child->thread.fp_state.fpscr = data;
  2653. ret = 0;
  2654. }
  2655. break;
  2656. }
  2657. case PPC_PTRACE_GETHWDBGINFO: {
  2658. struct ppc_debug_info dbginfo;
  2659. dbginfo.version = 1;
  2660. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  2661. dbginfo.num_instruction_bps = CONFIG_PPC_ADV_DEBUG_IACS;
  2662. dbginfo.num_data_bps = CONFIG_PPC_ADV_DEBUG_DACS;
  2663. dbginfo.num_condition_regs = CONFIG_PPC_ADV_DEBUG_DVCS;
  2664. dbginfo.data_bp_alignment = 4;
  2665. dbginfo.sizeof_condition = 4;
  2666. dbginfo.features = PPC_DEBUG_FEATURE_INSN_BP_RANGE |
  2667. PPC_DEBUG_FEATURE_INSN_BP_MASK;
  2668. #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
  2669. dbginfo.features |=
  2670. PPC_DEBUG_FEATURE_DATA_BP_RANGE |
  2671. PPC_DEBUG_FEATURE_DATA_BP_MASK;
  2672. #endif
  2673. #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
  2674. dbginfo.num_instruction_bps = 0;
  2675. dbginfo.num_data_bps = 1;
  2676. dbginfo.num_condition_regs = 0;
  2677. #ifdef CONFIG_PPC64
  2678. dbginfo.data_bp_alignment = 8;
  2679. #else
  2680. dbginfo.data_bp_alignment = 4;
  2681. #endif
  2682. dbginfo.sizeof_condition = 0;
  2683. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  2684. dbginfo.features = PPC_DEBUG_FEATURE_DATA_BP_RANGE;
  2685. if (cpu_has_feature(CPU_FTR_DAWR))
  2686. dbginfo.features |= PPC_DEBUG_FEATURE_DATA_BP_DAWR;
  2687. #else
  2688. dbginfo.features = 0;
  2689. #endif /* CONFIG_HAVE_HW_BREAKPOINT */
  2690. #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
  2691. if (!access_ok(VERIFY_WRITE, datavp,
  2692. sizeof(struct ppc_debug_info)))
  2693. return -EFAULT;
  2694. ret = __copy_to_user(datavp, &dbginfo,
  2695. sizeof(struct ppc_debug_info)) ?
  2696. -EFAULT : 0;
  2697. break;
  2698. }
  2699. case PPC_PTRACE_SETHWDEBUG: {
  2700. struct ppc_hw_breakpoint bp_info;
  2701. if (!access_ok(VERIFY_READ, datavp,
  2702. sizeof(struct ppc_hw_breakpoint)))
  2703. return -EFAULT;
  2704. ret = __copy_from_user(&bp_info, datavp,
  2705. sizeof(struct ppc_hw_breakpoint)) ?
  2706. -EFAULT : 0;
  2707. if (!ret)
  2708. ret = ppc_set_hwdebug(child, &bp_info);
  2709. break;
  2710. }
  2711. case PPC_PTRACE_DELHWDEBUG: {
  2712. ret = ppc_del_hwdebug(child, data);
  2713. break;
  2714. }
  2715. case PTRACE_GET_DEBUGREG: {
  2716. #ifndef CONFIG_PPC_ADV_DEBUG_REGS
  2717. unsigned long dabr_fake;
  2718. #endif
  2719. ret = -EINVAL;
  2720. /* We only support one DABR and no IABRS at the moment */
  2721. if (addr > 0)
  2722. break;
  2723. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  2724. ret = put_user(child->thread.debug.dac1, datalp);
  2725. #else
  2726. dabr_fake = ((child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) |
  2727. (child->thread.hw_brk.type & HW_BRK_TYPE_DABR));
  2728. ret = put_user(dabr_fake, datalp);
  2729. #endif
  2730. break;
  2731. }
  2732. case PTRACE_SET_DEBUGREG:
  2733. ret = ptrace_set_debugreg(child, addr, data);
  2734. break;
  2735. #ifdef CONFIG_PPC64
  2736. case PTRACE_GETREGS64:
  2737. #endif
  2738. case PTRACE_GETREGS: /* Get all pt_regs from the child. */
  2739. return copy_regset_to_user(child, &user_ppc_native_view,
  2740. REGSET_GPR,
  2741. 0, sizeof(struct pt_regs),
  2742. datavp);
  2743. #ifdef CONFIG_PPC64
  2744. case PTRACE_SETREGS64:
  2745. #endif
  2746. case PTRACE_SETREGS: /* Set all gp regs in the child. */
  2747. return copy_regset_from_user(child, &user_ppc_native_view,
  2748. REGSET_GPR,
  2749. 0, sizeof(struct pt_regs),
  2750. datavp);
  2751. case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
  2752. return copy_regset_to_user(child, &user_ppc_native_view,
  2753. REGSET_FPR,
  2754. 0, sizeof(elf_fpregset_t),
  2755. datavp);
  2756. case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */
  2757. return copy_regset_from_user(child, &user_ppc_native_view,
  2758. REGSET_FPR,
  2759. 0, sizeof(elf_fpregset_t),
  2760. datavp);
  2761. #ifdef CONFIG_ALTIVEC
  2762. case PTRACE_GETVRREGS:
  2763. return copy_regset_to_user(child, &user_ppc_native_view,
  2764. REGSET_VMX,
  2765. 0, (33 * sizeof(vector128) +
  2766. sizeof(u32)),
  2767. datavp);
  2768. case PTRACE_SETVRREGS:
  2769. return copy_regset_from_user(child, &user_ppc_native_view,
  2770. REGSET_VMX,
  2771. 0, (33 * sizeof(vector128) +
  2772. sizeof(u32)),
  2773. datavp);
  2774. #endif
  2775. #ifdef CONFIG_VSX
  2776. case PTRACE_GETVSRREGS:
  2777. return copy_regset_to_user(child, &user_ppc_native_view,
  2778. REGSET_VSX,
  2779. 0, 32 * sizeof(double),
  2780. datavp);
  2781. case PTRACE_SETVSRREGS:
  2782. return copy_regset_from_user(child, &user_ppc_native_view,
  2783. REGSET_VSX,
  2784. 0, 32 * sizeof(double),
  2785. datavp);
  2786. #endif
  2787. #ifdef CONFIG_SPE
  2788. case PTRACE_GETEVRREGS:
  2789. /* Get the child spe register state. */
  2790. return copy_regset_to_user(child, &user_ppc_native_view,
  2791. REGSET_SPE, 0, 35 * sizeof(u32),
  2792. datavp);
  2793. case PTRACE_SETEVRREGS:
  2794. /* Set the child spe register state. */
  2795. return copy_regset_from_user(child, &user_ppc_native_view,
  2796. REGSET_SPE, 0, 35 * sizeof(u32),
  2797. datavp);
  2798. #endif
  2799. default:
  2800. ret = ptrace_request(child, request, addr, data);
  2801. break;
  2802. }
  2803. return ret;
  2804. }
  2805. #ifdef CONFIG_SECCOMP
  2806. static int do_seccomp(struct pt_regs *regs)
  2807. {
  2808. if (!test_thread_flag(TIF_SECCOMP))
  2809. return 0;
  2810. /*
  2811. * The ABI we present to seccomp tracers is that r3 contains
  2812. * the syscall return value and orig_gpr3 contains the first
  2813. * syscall parameter. This is different to the ptrace ABI where
  2814. * both r3 and orig_gpr3 contain the first syscall parameter.
  2815. */
  2816. regs->gpr[3] = -ENOSYS;
  2817. /*
  2818. * We use the __ version here because we have already checked
  2819. * TIF_SECCOMP. If this fails, there is nothing left to do, we
  2820. * have already loaded -ENOSYS into r3, or seccomp has put
  2821. * something else in r3 (via SECCOMP_RET_ERRNO/TRACE).
  2822. */
  2823. if (__secure_computing(NULL))
  2824. return -1;
  2825. /*
  2826. * The syscall was allowed by seccomp, restore the register
  2827. * state to what audit expects.
  2828. * Note that we use orig_gpr3, which means a seccomp tracer can
  2829. * modify the first syscall parameter (in orig_gpr3) and also
  2830. * allow the syscall to proceed.
  2831. */
  2832. regs->gpr[3] = regs->orig_gpr3;
  2833. return 0;
  2834. }
  2835. #else
  2836. static inline int do_seccomp(struct pt_regs *regs) { return 0; }
  2837. #endif /* CONFIG_SECCOMP */
  2838. /**
  2839. * do_syscall_trace_enter() - Do syscall tracing on kernel entry.
  2840. * @regs: the pt_regs of the task to trace (current)
  2841. *
  2842. * Performs various types of tracing on syscall entry. This includes seccomp,
  2843. * ptrace, syscall tracepoints and audit.
  2844. *
  2845. * The pt_regs are potentially visible to userspace via ptrace, so their
  2846. * contents is ABI.
  2847. *
  2848. * One or more of the tracers may modify the contents of pt_regs, in particular
  2849. * to modify arguments or even the syscall number itself.
  2850. *
  2851. * It's also possible that a tracer can choose to reject the system call. In
  2852. * that case this function will return an illegal syscall number, and will put
  2853. * an appropriate return value in regs->r3.
  2854. *
  2855. * Return: the (possibly changed) syscall number.
  2856. */
  2857. long do_syscall_trace_enter(struct pt_regs *regs)
  2858. {
  2859. user_exit();
  2860. /*
  2861. * The tracer may decide to abort the syscall, if so tracehook
  2862. * will return !0. Note that the tracer may also just change
  2863. * regs->gpr[0] to an invalid syscall number, that is handled
  2864. * below on the exit path.
  2865. */
  2866. if (test_thread_flag(TIF_SYSCALL_TRACE) &&
  2867. tracehook_report_syscall_entry(regs))
  2868. goto skip;
  2869. /* Run seccomp after ptrace; allow it to set gpr[3]. */
  2870. if (do_seccomp(regs))
  2871. return -1;
  2872. /* Avoid trace and audit when syscall is invalid. */
  2873. if (regs->gpr[0] >= NR_syscalls)
  2874. goto skip;
  2875. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  2876. trace_sys_enter(regs, regs->gpr[0]);
  2877. #ifdef CONFIG_PPC64
  2878. if (!is_32bit_task())
  2879. audit_syscall_entry(regs->gpr[0], regs->gpr[3], regs->gpr[4],
  2880. regs->gpr[5], regs->gpr[6]);
  2881. else
  2882. #endif
  2883. audit_syscall_entry(regs->gpr[0],
  2884. regs->gpr[3] & 0xffffffff,
  2885. regs->gpr[4] & 0xffffffff,
  2886. regs->gpr[5] & 0xffffffff,
  2887. regs->gpr[6] & 0xffffffff);
  2888. /* Return the possibly modified but valid syscall number */
  2889. return regs->gpr[0];
  2890. skip:
  2891. /*
  2892. * If we are aborting explicitly, or if the syscall number is
  2893. * now invalid, set the return value to -ENOSYS.
  2894. */
  2895. regs->gpr[3] = -ENOSYS;
  2896. return -1;
  2897. }
  2898. void do_syscall_trace_leave(struct pt_regs *regs)
  2899. {
  2900. int step;
  2901. audit_syscall_exit(regs);
  2902. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  2903. trace_sys_exit(regs, regs->result);
  2904. step = test_thread_flag(TIF_SINGLESTEP);
  2905. if (step || test_thread_flag(TIF_SYSCALL_TRACE))
  2906. tracehook_report_syscall_exit(regs, step);
  2907. user_enter();
  2908. }