kprobes.c 62 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547
  1. /*
  2. * Kernel Probes (KProbes)
  3. * kernel/kprobes.c
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  18. *
  19. * Copyright (C) IBM Corporation, 2002, 2004
  20. *
  21. * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
  22. * Probes initial implementation (includes suggestions from
  23. * Rusty Russell).
  24. * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
  25. * hlists and exceptions notifier as suggested by Andi Kleen.
  26. * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
  27. * interface to access function arguments.
  28. * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
  29. * exceptions notifier to be first on the priority list.
  30. * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
  31. * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
  32. * <prasanna@in.ibm.com> added function-return probes.
  33. */
  34. #include <linux/kprobes.h>
  35. #include <linux/hash.h>
  36. #include <linux/init.h>
  37. #include <linux/slab.h>
  38. #include <linux/stddef.h>
  39. #include <linux/export.h>
  40. #include <linux/moduleloader.h>
  41. #include <linux/kallsyms.h>
  42. #include <linux/freezer.h>
  43. #include <linux/seq_file.h>
  44. #include <linux/debugfs.h>
  45. #include <linux/sysctl.h>
  46. #include <linux/kdebug.h>
  47. #include <linux/memory.h>
  48. #include <linux/ftrace.h>
  49. #include <linux/cpu.h>
  50. #include <linux/jump_label.h>
  51. #include <asm/sections.h>
  52. #include <asm/cacheflush.h>
  53. #include <asm/errno.h>
  54. #include <linux/uaccess.h>
  55. #define KPROBE_HASH_BITS 6
  56. #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
  57. static int kprobes_initialized;
  58. static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
  59. static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
  60. /* NOTE: change this value only with kprobe_mutex held */
  61. static bool kprobes_all_disarmed;
  62. /* This protects kprobe_table and optimizing_list */
  63. static DEFINE_MUTEX(kprobe_mutex);
  64. static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
  65. static struct {
  66. raw_spinlock_t lock ____cacheline_aligned_in_smp;
  67. } kretprobe_table_locks[KPROBE_TABLE_SIZE];
  68. kprobe_opcode_t * __weak kprobe_lookup_name(const char *name,
  69. unsigned int __unused)
  70. {
  71. return ((kprobe_opcode_t *)(kallsyms_lookup_name(name)));
  72. }
  73. static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
  74. {
  75. return &(kretprobe_table_locks[hash].lock);
  76. }
  77. /* Blacklist -- list of struct kprobe_blacklist_entry */
  78. static LIST_HEAD(kprobe_blacklist);
  79. #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
  80. /*
  81. * kprobe->ainsn.insn points to the copy of the instruction to be
  82. * single-stepped. x86_64, POWER4 and above have no-exec support and
  83. * stepping on the instruction on a vmalloced/kmalloced/data page
  84. * is a recipe for disaster
  85. */
  86. struct kprobe_insn_page {
  87. struct list_head list;
  88. kprobe_opcode_t *insns; /* Page of instruction slots */
  89. struct kprobe_insn_cache *cache;
  90. int nused;
  91. int ngarbage;
  92. char slot_used[];
  93. };
  94. #define KPROBE_INSN_PAGE_SIZE(slots) \
  95. (offsetof(struct kprobe_insn_page, slot_used) + \
  96. (sizeof(char) * (slots)))
  97. static int slots_per_page(struct kprobe_insn_cache *c)
  98. {
  99. return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
  100. }
  101. enum kprobe_slot_state {
  102. SLOT_CLEAN = 0,
  103. SLOT_DIRTY = 1,
  104. SLOT_USED = 2,
  105. };
  106. void __weak *alloc_insn_page(void)
  107. {
  108. return module_alloc(PAGE_SIZE);
  109. }
  110. void __weak free_insn_page(void *page)
  111. {
  112. module_memfree(page);
  113. }
  114. struct kprobe_insn_cache kprobe_insn_slots = {
  115. .mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex),
  116. .alloc = alloc_insn_page,
  117. .free = free_insn_page,
  118. .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
  119. .insn_size = MAX_INSN_SIZE,
  120. .nr_garbage = 0,
  121. };
  122. static int collect_garbage_slots(struct kprobe_insn_cache *c);
  123. /**
  124. * __get_insn_slot() - Find a slot on an executable page for an instruction.
  125. * We allocate an executable page if there's no room on existing ones.
  126. */
  127. kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
  128. {
  129. struct kprobe_insn_page *kip;
  130. kprobe_opcode_t *slot = NULL;
  131. /* Since the slot array is not protected by rcu, we need a mutex */
  132. mutex_lock(&c->mutex);
  133. retry:
  134. rcu_read_lock();
  135. list_for_each_entry_rcu(kip, &c->pages, list) {
  136. if (kip->nused < slots_per_page(c)) {
  137. int i;
  138. for (i = 0; i < slots_per_page(c); i++) {
  139. if (kip->slot_used[i] == SLOT_CLEAN) {
  140. kip->slot_used[i] = SLOT_USED;
  141. kip->nused++;
  142. slot = kip->insns + (i * c->insn_size);
  143. rcu_read_unlock();
  144. goto out;
  145. }
  146. }
  147. /* kip->nused is broken. Fix it. */
  148. kip->nused = slots_per_page(c);
  149. WARN_ON(1);
  150. }
  151. }
  152. rcu_read_unlock();
  153. /* If there are any garbage slots, collect it and try again. */
  154. if (c->nr_garbage && collect_garbage_slots(c) == 0)
  155. goto retry;
  156. /* All out of space. Need to allocate a new page. */
  157. kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
  158. if (!kip)
  159. goto out;
  160. /*
  161. * Use module_alloc so this page is within +/- 2GB of where the
  162. * kernel image and loaded module images reside. This is required
  163. * so x86_64 can correctly handle the %rip-relative fixups.
  164. */
  165. kip->insns = c->alloc();
  166. if (!kip->insns) {
  167. kfree(kip);
  168. goto out;
  169. }
  170. INIT_LIST_HEAD(&kip->list);
  171. memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
  172. kip->slot_used[0] = SLOT_USED;
  173. kip->nused = 1;
  174. kip->ngarbage = 0;
  175. kip->cache = c;
  176. list_add_rcu(&kip->list, &c->pages);
  177. slot = kip->insns;
  178. out:
  179. mutex_unlock(&c->mutex);
  180. return slot;
  181. }
  182. /* Return 1 if all garbages are collected, otherwise 0. */
  183. static int collect_one_slot(struct kprobe_insn_page *kip, int idx)
  184. {
  185. kip->slot_used[idx] = SLOT_CLEAN;
  186. kip->nused--;
  187. if (kip->nused == 0) {
  188. /*
  189. * Page is no longer in use. Free it unless
  190. * it's the last one. We keep the last one
  191. * so as not to have to set it up again the
  192. * next time somebody inserts a probe.
  193. */
  194. if (!list_is_singular(&kip->list)) {
  195. list_del_rcu(&kip->list);
  196. synchronize_rcu();
  197. kip->cache->free(kip->insns);
  198. kfree(kip);
  199. }
  200. return 1;
  201. }
  202. return 0;
  203. }
  204. static int collect_garbage_slots(struct kprobe_insn_cache *c)
  205. {
  206. struct kprobe_insn_page *kip, *next;
  207. /* Ensure no-one is interrupted on the garbages */
  208. synchronize_sched();
  209. list_for_each_entry_safe(kip, next, &c->pages, list) {
  210. int i;
  211. if (kip->ngarbage == 0)
  212. continue;
  213. kip->ngarbage = 0; /* we will collect all garbages */
  214. for (i = 0; i < slots_per_page(c); i++) {
  215. if (kip->slot_used[i] == SLOT_DIRTY && collect_one_slot(kip, i))
  216. break;
  217. }
  218. }
  219. c->nr_garbage = 0;
  220. return 0;
  221. }
  222. void __free_insn_slot(struct kprobe_insn_cache *c,
  223. kprobe_opcode_t *slot, int dirty)
  224. {
  225. struct kprobe_insn_page *kip;
  226. long idx;
  227. mutex_lock(&c->mutex);
  228. rcu_read_lock();
  229. list_for_each_entry_rcu(kip, &c->pages, list) {
  230. idx = ((long)slot - (long)kip->insns) /
  231. (c->insn_size * sizeof(kprobe_opcode_t));
  232. if (idx >= 0 && idx < slots_per_page(c))
  233. goto out;
  234. }
  235. /* Could not find this slot. */
  236. WARN_ON(1);
  237. kip = NULL;
  238. out:
  239. rcu_read_unlock();
  240. /* Mark and sweep: this may sleep */
  241. if (kip) {
  242. /* Check double free */
  243. WARN_ON(kip->slot_used[idx] != SLOT_USED);
  244. if (dirty) {
  245. kip->slot_used[idx] = SLOT_DIRTY;
  246. kip->ngarbage++;
  247. if (++c->nr_garbage > slots_per_page(c))
  248. collect_garbage_slots(c);
  249. } else {
  250. collect_one_slot(kip, idx);
  251. }
  252. }
  253. mutex_unlock(&c->mutex);
  254. }
  255. /*
  256. * Check given address is on the page of kprobe instruction slots.
  257. * This will be used for checking whether the address on a stack
  258. * is on a text area or not.
  259. */
  260. bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr)
  261. {
  262. struct kprobe_insn_page *kip;
  263. bool ret = false;
  264. rcu_read_lock();
  265. list_for_each_entry_rcu(kip, &c->pages, list) {
  266. if (addr >= (unsigned long)kip->insns &&
  267. addr < (unsigned long)kip->insns + PAGE_SIZE) {
  268. ret = true;
  269. break;
  270. }
  271. }
  272. rcu_read_unlock();
  273. return ret;
  274. }
  275. #ifdef CONFIG_OPTPROBES
  276. /* For optimized_kprobe buffer */
  277. struct kprobe_insn_cache kprobe_optinsn_slots = {
  278. .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex),
  279. .alloc = alloc_insn_page,
  280. .free = free_insn_page,
  281. .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
  282. /* .insn_size is initialized later */
  283. .nr_garbage = 0,
  284. };
  285. #endif
  286. #endif
  287. /* We have preemption disabled.. so it is safe to use __ versions */
  288. static inline void set_kprobe_instance(struct kprobe *kp)
  289. {
  290. __this_cpu_write(kprobe_instance, kp);
  291. }
  292. static inline void reset_kprobe_instance(void)
  293. {
  294. __this_cpu_write(kprobe_instance, NULL);
  295. }
  296. /*
  297. * This routine is called either:
  298. * - under the kprobe_mutex - during kprobe_[un]register()
  299. * OR
  300. * - with preemption disabled - from arch/xxx/kernel/kprobes.c
  301. */
  302. struct kprobe *get_kprobe(void *addr)
  303. {
  304. struct hlist_head *head;
  305. struct kprobe *p;
  306. head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
  307. hlist_for_each_entry_rcu(p, head, hlist) {
  308. if (p->addr == addr)
  309. return p;
  310. }
  311. return NULL;
  312. }
  313. NOKPROBE_SYMBOL(get_kprobe);
  314. static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
  315. /* Return true if the kprobe is an aggregator */
  316. static inline int kprobe_aggrprobe(struct kprobe *p)
  317. {
  318. return p->pre_handler == aggr_pre_handler;
  319. }
  320. /* Return true(!0) if the kprobe is unused */
  321. static inline int kprobe_unused(struct kprobe *p)
  322. {
  323. return kprobe_aggrprobe(p) && kprobe_disabled(p) &&
  324. list_empty(&p->list);
  325. }
  326. /*
  327. * Keep all fields in the kprobe consistent
  328. */
  329. static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p)
  330. {
  331. memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t));
  332. memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn));
  333. }
  334. #ifdef CONFIG_OPTPROBES
  335. /* NOTE: change this value only with kprobe_mutex held */
  336. static bool kprobes_allow_optimization;
  337. /*
  338. * Call all pre_handler on the list, but ignores its return value.
  339. * This must be called from arch-dep optimized caller.
  340. */
  341. void opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
  342. {
  343. struct kprobe *kp;
  344. list_for_each_entry_rcu(kp, &p->list, list) {
  345. if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
  346. set_kprobe_instance(kp);
  347. kp->pre_handler(kp, regs);
  348. }
  349. reset_kprobe_instance();
  350. }
  351. }
  352. NOKPROBE_SYMBOL(opt_pre_handler);
  353. /* Free optimized instructions and optimized_kprobe */
  354. static void free_aggr_kprobe(struct kprobe *p)
  355. {
  356. struct optimized_kprobe *op;
  357. op = container_of(p, struct optimized_kprobe, kp);
  358. arch_remove_optimized_kprobe(op);
  359. arch_remove_kprobe(p);
  360. kfree(op);
  361. }
  362. /* Return true(!0) if the kprobe is ready for optimization. */
  363. static inline int kprobe_optready(struct kprobe *p)
  364. {
  365. struct optimized_kprobe *op;
  366. if (kprobe_aggrprobe(p)) {
  367. op = container_of(p, struct optimized_kprobe, kp);
  368. return arch_prepared_optinsn(&op->optinsn);
  369. }
  370. return 0;
  371. }
  372. /* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */
  373. static inline int kprobe_disarmed(struct kprobe *p)
  374. {
  375. struct optimized_kprobe *op;
  376. /* If kprobe is not aggr/opt probe, just return kprobe is disabled */
  377. if (!kprobe_aggrprobe(p))
  378. return kprobe_disabled(p);
  379. op = container_of(p, struct optimized_kprobe, kp);
  380. return kprobe_disabled(p) && list_empty(&op->list);
  381. }
  382. /* Return true(!0) if the probe is queued on (un)optimizing lists */
  383. static int kprobe_queued(struct kprobe *p)
  384. {
  385. struct optimized_kprobe *op;
  386. if (kprobe_aggrprobe(p)) {
  387. op = container_of(p, struct optimized_kprobe, kp);
  388. if (!list_empty(&op->list))
  389. return 1;
  390. }
  391. return 0;
  392. }
  393. /*
  394. * Return an optimized kprobe whose optimizing code replaces
  395. * instructions including addr (exclude breakpoint).
  396. */
  397. static struct kprobe *get_optimized_kprobe(unsigned long addr)
  398. {
  399. int i;
  400. struct kprobe *p = NULL;
  401. struct optimized_kprobe *op;
  402. /* Don't check i == 0, since that is a breakpoint case. */
  403. for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++)
  404. p = get_kprobe((void *)(addr - i));
  405. if (p && kprobe_optready(p)) {
  406. op = container_of(p, struct optimized_kprobe, kp);
  407. if (arch_within_optimized_kprobe(op, addr))
  408. return p;
  409. }
  410. return NULL;
  411. }
  412. /* Optimization staging list, protected by kprobe_mutex */
  413. static LIST_HEAD(optimizing_list);
  414. static LIST_HEAD(unoptimizing_list);
  415. static LIST_HEAD(freeing_list);
  416. static void kprobe_optimizer(struct work_struct *work);
  417. static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
  418. #define OPTIMIZE_DELAY 5
  419. /*
  420. * Optimize (replace a breakpoint with a jump) kprobes listed on
  421. * optimizing_list.
  422. */
  423. static void do_optimize_kprobes(void)
  424. {
  425. /*
  426. * The optimization/unoptimization refers online_cpus via
  427. * stop_machine() and cpu-hotplug modifies online_cpus.
  428. * And same time, text_mutex will be held in cpu-hotplug and here.
  429. * This combination can cause a deadlock (cpu-hotplug try to lock
  430. * text_mutex but stop_machine can not be done because online_cpus
  431. * has been changed)
  432. * To avoid this deadlock, caller must have locked cpu hotplug
  433. * for preventing cpu-hotplug outside of text_mutex locking.
  434. */
  435. lockdep_assert_cpus_held();
  436. /* Optimization never be done when disarmed */
  437. if (kprobes_all_disarmed || !kprobes_allow_optimization ||
  438. list_empty(&optimizing_list))
  439. return;
  440. mutex_lock(&text_mutex);
  441. arch_optimize_kprobes(&optimizing_list);
  442. mutex_unlock(&text_mutex);
  443. }
  444. /*
  445. * Unoptimize (replace a jump with a breakpoint and remove the breakpoint
  446. * if need) kprobes listed on unoptimizing_list.
  447. */
  448. static void do_unoptimize_kprobes(void)
  449. {
  450. struct optimized_kprobe *op, *tmp;
  451. /* See comment in do_optimize_kprobes() */
  452. lockdep_assert_cpus_held();
  453. /* Unoptimization must be done anytime */
  454. if (list_empty(&unoptimizing_list))
  455. return;
  456. mutex_lock(&text_mutex);
  457. arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
  458. /* Loop free_list for disarming */
  459. list_for_each_entry_safe(op, tmp, &freeing_list, list) {
  460. /* Disarm probes if marked disabled */
  461. if (kprobe_disabled(&op->kp))
  462. arch_disarm_kprobe(&op->kp);
  463. if (kprobe_unused(&op->kp)) {
  464. /*
  465. * Remove unused probes from hash list. After waiting
  466. * for synchronization, these probes are reclaimed.
  467. * (reclaiming is done by do_free_cleaned_kprobes.)
  468. */
  469. hlist_del_rcu(&op->kp.hlist);
  470. } else
  471. list_del_init(&op->list);
  472. }
  473. mutex_unlock(&text_mutex);
  474. }
  475. /* Reclaim all kprobes on the free_list */
  476. static void do_free_cleaned_kprobes(void)
  477. {
  478. struct optimized_kprobe *op, *tmp;
  479. list_for_each_entry_safe(op, tmp, &freeing_list, list) {
  480. BUG_ON(!kprobe_unused(&op->kp));
  481. list_del_init(&op->list);
  482. free_aggr_kprobe(&op->kp);
  483. }
  484. }
  485. /* Start optimizer after OPTIMIZE_DELAY passed */
  486. static void kick_kprobe_optimizer(void)
  487. {
  488. schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
  489. }
  490. /* Kprobe jump optimizer */
  491. static void kprobe_optimizer(struct work_struct *work)
  492. {
  493. mutex_lock(&kprobe_mutex);
  494. cpus_read_lock();
  495. /* Lock modules while optimizing kprobes */
  496. mutex_lock(&module_mutex);
  497. /*
  498. * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
  499. * kprobes before waiting for quiesence period.
  500. */
  501. do_unoptimize_kprobes();
  502. /*
  503. * Step 2: Wait for quiesence period to ensure all potentially
  504. * preempted tasks to have normally scheduled. Because optprobe
  505. * may modify multiple instructions, there is a chance that Nth
  506. * instruction is preempted. In that case, such tasks can return
  507. * to 2nd-Nth byte of jump instruction. This wait is for avoiding it.
  508. * Note that on non-preemptive kernel, this is transparently converted
  509. * to synchronoze_sched() to wait for all interrupts to have completed.
  510. */
  511. synchronize_rcu_tasks();
  512. /* Step 3: Optimize kprobes after quiesence period */
  513. do_optimize_kprobes();
  514. /* Step 4: Free cleaned kprobes after quiesence period */
  515. do_free_cleaned_kprobes();
  516. mutex_unlock(&module_mutex);
  517. cpus_read_unlock();
  518. mutex_unlock(&kprobe_mutex);
  519. /* Step 5: Kick optimizer again if needed */
  520. if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
  521. kick_kprobe_optimizer();
  522. }
  523. /* Wait for completing optimization and unoptimization */
  524. void wait_for_kprobe_optimizer(void)
  525. {
  526. mutex_lock(&kprobe_mutex);
  527. while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
  528. mutex_unlock(&kprobe_mutex);
  529. /* this will also make optimizing_work execute immmediately */
  530. flush_delayed_work(&optimizing_work);
  531. /* @optimizing_work might not have been queued yet, relax */
  532. cpu_relax();
  533. mutex_lock(&kprobe_mutex);
  534. }
  535. mutex_unlock(&kprobe_mutex);
  536. }
  537. /* Optimize kprobe if p is ready to be optimized */
  538. static void optimize_kprobe(struct kprobe *p)
  539. {
  540. struct optimized_kprobe *op;
  541. /* Check if the kprobe is disabled or not ready for optimization. */
  542. if (!kprobe_optready(p) || !kprobes_allow_optimization ||
  543. (kprobe_disabled(p) || kprobes_all_disarmed))
  544. return;
  545. /* kprobes with post_handler can not be optimized */
  546. if (p->post_handler)
  547. return;
  548. op = container_of(p, struct optimized_kprobe, kp);
  549. /* Check there is no other kprobes at the optimized instructions */
  550. if (arch_check_optimized_kprobe(op) < 0)
  551. return;
  552. /* Check if it is already optimized. */
  553. if (op->kp.flags & KPROBE_FLAG_OPTIMIZED)
  554. return;
  555. op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
  556. if (!list_empty(&op->list))
  557. /* This is under unoptimizing. Just dequeue the probe */
  558. list_del_init(&op->list);
  559. else {
  560. list_add(&op->list, &optimizing_list);
  561. kick_kprobe_optimizer();
  562. }
  563. }
  564. /* Short cut to direct unoptimizing */
  565. static void force_unoptimize_kprobe(struct optimized_kprobe *op)
  566. {
  567. lockdep_assert_cpus_held();
  568. arch_unoptimize_kprobe(op);
  569. if (kprobe_disabled(&op->kp))
  570. arch_disarm_kprobe(&op->kp);
  571. }
  572. /* Unoptimize a kprobe if p is optimized */
  573. static void unoptimize_kprobe(struct kprobe *p, bool force)
  574. {
  575. struct optimized_kprobe *op;
  576. if (!kprobe_aggrprobe(p) || kprobe_disarmed(p))
  577. return; /* This is not an optprobe nor optimized */
  578. op = container_of(p, struct optimized_kprobe, kp);
  579. if (!kprobe_optimized(p)) {
  580. /* Unoptimized or unoptimizing case */
  581. if (force && !list_empty(&op->list)) {
  582. /*
  583. * Only if this is unoptimizing kprobe and forced,
  584. * forcibly unoptimize it. (No need to unoptimize
  585. * unoptimized kprobe again :)
  586. */
  587. list_del_init(&op->list);
  588. force_unoptimize_kprobe(op);
  589. }
  590. return;
  591. }
  592. op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
  593. if (!list_empty(&op->list)) {
  594. /* Dequeue from the optimization queue */
  595. list_del_init(&op->list);
  596. return;
  597. }
  598. /* Optimized kprobe case */
  599. if (force)
  600. /* Forcibly update the code: this is a special case */
  601. force_unoptimize_kprobe(op);
  602. else {
  603. list_add(&op->list, &unoptimizing_list);
  604. kick_kprobe_optimizer();
  605. }
  606. }
  607. /* Cancel unoptimizing for reusing */
  608. static void reuse_unused_kprobe(struct kprobe *ap)
  609. {
  610. struct optimized_kprobe *op;
  611. BUG_ON(!kprobe_unused(ap));
  612. /*
  613. * Unused kprobe MUST be on the way of delayed unoptimizing (means
  614. * there is still a relative jump) and disabled.
  615. */
  616. op = container_of(ap, struct optimized_kprobe, kp);
  617. WARN_ON_ONCE(list_empty(&op->list));
  618. /* Enable the probe again */
  619. ap->flags &= ~KPROBE_FLAG_DISABLED;
  620. /* Optimize it again (remove from op->list) */
  621. BUG_ON(!kprobe_optready(ap));
  622. optimize_kprobe(ap);
  623. }
  624. /* Remove optimized instructions */
  625. static void kill_optimized_kprobe(struct kprobe *p)
  626. {
  627. struct optimized_kprobe *op;
  628. op = container_of(p, struct optimized_kprobe, kp);
  629. if (!list_empty(&op->list))
  630. /* Dequeue from the (un)optimization queue */
  631. list_del_init(&op->list);
  632. op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
  633. if (kprobe_unused(p)) {
  634. /* Enqueue if it is unused */
  635. list_add(&op->list, &freeing_list);
  636. /*
  637. * Remove unused probes from the hash list. After waiting
  638. * for synchronization, this probe is reclaimed.
  639. * (reclaiming is done by do_free_cleaned_kprobes().)
  640. */
  641. hlist_del_rcu(&op->kp.hlist);
  642. }
  643. /* Don't touch the code, because it is already freed. */
  644. arch_remove_optimized_kprobe(op);
  645. }
  646. static inline
  647. void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
  648. {
  649. if (!kprobe_ftrace(p))
  650. arch_prepare_optimized_kprobe(op, p);
  651. }
  652. /* Try to prepare optimized instructions */
  653. static void prepare_optimized_kprobe(struct kprobe *p)
  654. {
  655. struct optimized_kprobe *op;
  656. op = container_of(p, struct optimized_kprobe, kp);
  657. __prepare_optimized_kprobe(op, p);
  658. }
  659. /* Allocate new optimized_kprobe and try to prepare optimized instructions */
  660. static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
  661. {
  662. struct optimized_kprobe *op;
  663. op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL);
  664. if (!op)
  665. return NULL;
  666. INIT_LIST_HEAD(&op->list);
  667. op->kp.addr = p->addr;
  668. __prepare_optimized_kprobe(op, p);
  669. return &op->kp;
  670. }
  671. static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
  672. /*
  673. * Prepare an optimized_kprobe and optimize it
  674. * NOTE: p must be a normal registered kprobe
  675. */
  676. static void try_to_optimize_kprobe(struct kprobe *p)
  677. {
  678. struct kprobe *ap;
  679. struct optimized_kprobe *op;
  680. /* Impossible to optimize ftrace-based kprobe */
  681. if (kprobe_ftrace(p))
  682. return;
  683. /* For preparing optimization, jump_label_text_reserved() is called */
  684. cpus_read_lock();
  685. jump_label_lock();
  686. mutex_lock(&text_mutex);
  687. ap = alloc_aggr_kprobe(p);
  688. if (!ap)
  689. goto out;
  690. op = container_of(ap, struct optimized_kprobe, kp);
  691. if (!arch_prepared_optinsn(&op->optinsn)) {
  692. /* If failed to setup optimizing, fallback to kprobe */
  693. arch_remove_optimized_kprobe(op);
  694. kfree(op);
  695. goto out;
  696. }
  697. init_aggr_kprobe(ap, p);
  698. optimize_kprobe(ap); /* This just kicks optimizer thread */
  699. out:
  700. mutex_unlock(&text_mutex);
  701. jump_label_unlock();
  702. cpus_read_unlock();
  703. }
  704. #ifdef CONFIG_SYSCTL
  705. static void optimize_all_kprobes(void)
  706. {
  707. struct hlist_head *head;
  708. struct kprobe *p;
  709. unsigned int i;
  710. mutex_lock(&kprobe_mutex);
  711. /* If optimization is already allowed, just return */
  712. if (kprobes_allow_optimization)
  713. goto out;
  714. cpus_read_lock();
  715. kprobes_allow_optimization = true;
  716. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  717. head = &kprobe_table[i];
  718. hlist_for_each_entry_rcu(p, head, hlist)
  719. if (!kprobe_disabled(p))
  720. optimize_kprobe(p);
  721. }
  722. cpus_read_unlock();
  723. printk(KERN_INFO "Kprobes globally optimized\n");
  724. out:
  725. mutex_unlock(&kprobe_mutex);
  726. }
  727. static void unoptimize_all_kprobes(void)
  728. {
  729. struct hlist_head *head;
  730. struct kprobe *p;
  731. unsigned int i;
  732. mutex_lock(&kprobe_mutex);
  733. /* If optimization is already prohibited, just return */
  734. if (!kprobes_allow_optimization) {
  735. mutex_unlock(&kprobe_mutex);
  736. return;
  737. }
  738. cpus_read_lock();
  739. kprobes_allow_optimization = false;
  740. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  741. head = &kprobe_table[i];
  742. hlist_for_each_entry_rcu(p, head, hlist) {
  743. if (!kprobe_disabled(p))
  744. unoptimize_kprobe(p, false);
  745. }
  746. }
  747. cpus_read_unlock();
  748. mutex_unlock(&kprobe_mutex);
  749. /* Wait for unoptimizing completion */
  750. wait_for_kprobe_optimizer();
  751. printk(KERN_INFO "Kprobes globally unoptimized\n");
  752. }
  753. static DEFINE_MUTEX(kprobe_sysctl_mutex);
  754. int sysctl_kprobes_optimization;
  755. int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
  756. void __user *buffer, size_t *length,
  757. loff_t *ppos)
  758. {
  759. int ret;
  760. mutex_lock(&kprobe_sysctl_mutex);
  761. sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
  762. ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
  763. if (sysctl_kprobes_optimization)
  764. optimize_all_kprobes();
  765. else
  766. unoptimize_all_kprobes();
  767. mutex_unlock(&kprobe_sysctl_mutex);
  768. return ret;
  769. }
  770. #endif /* CONFIG_SYSCTL */
  771. /* Put a breakpoint for a probe. Must be called with text_mutex locked */
  772. static void __arm_kprobe(struct kprobe *p)
  773. {
  774. struct kprobe *_p;
  775. /* Check collision with other optimized kprobes */
  776. _p = get_optimized_kprobe((unsigned long)p->addr);
  777. if (unlikely(_p))
  778. /* Fallback to unoptimized kprobe */
  779. unoptimize_kprobe(_p, true);
  780. arch_arm_kprobe(p);
  781. optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */
  782. }
  783. /* Remove the breakpoint of a probe. Must be called with text_mutex locked */
  784. static void __disarm_kprobe(struct kprobe *p, bool reopt)
  785. {
  786. struct kprobe *_p;
  787. /* Try to unoptimize */
  788. unoptimize_kprobe(p, kprobes_all_disarmed);
  789. if (!kprobe_queued(p)) {
  790. arch_disarm_kprobe(p);
  791. /* If another kprobe was blocked, optimize it. */
  792. _p = get_optimized_kprobe((unsigned long)p->addr);
  793. if (unlikely(_p) && reopt)
  794. optimize_kprobe(_p);
  795. }
  796. /* TODO: reoptimize others after unoptimized this probe */
  797. }
  798. #else /* !CONFIG_OPTPROBES */
  799. #define optimize_kprobe(p) do {} while (0)
  800. #define unoptimize_kprobe(p, f) do {} while (0)
  801. #define kill_optimized_kprobe(p) do {} while (0)
  802. #define prepare_optimized_kprobe(p) do {} while (0)
  803. #define try_to_optimize_kprobe(p) do {} while (0)
  804. #define __arm_kprobe(p) arch_arm_kprobe(p)
  805. #define __disarm_kprobe(p, o) arch_disarm_kprobe(p)
  806. #define kprobe_disarmed(p) kprobe_disabled(p)
  807. #define wait_for_kprobe_optimizer() do {} while (0)
  808. /* There should be no unused kprobes can be reused without optimization */
  809. static void reuse_unused_kprobe(struct kprobe *ap)
  810. {
  811. printk(KERN_ERR "Error: There should be no unused kprobe here.\n");
  812. BUG_ON(kprobe_unused(ap));
  813. }
  814. static void free_aggr_kprobe(struct kprobe *p)
  815. {
  816. arch_remove_kprobe(p);
  817. kfree(p);
  818. }
  819. static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
  820. {
  821. return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
  822. }
  823. #endif /* CONFIG_OPTPROBES */
  824. #ifdef CONFIG_KPROBES_ON_FTRACE
  825. static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
  826. .func = kprobe_ftrace_handler,
  827. .flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY,
  828. };
  829. static int kprobe_ftrace_enabled;
  830. /* Must ensure p->addr is really on ftrace */
  831. static int prepare_kprobe(struct kprobe *p)
  832. {
  833. if (!kprobe_ftrace(p))
  834. return arch_prepare_kprobe(p);
  835. return arch_prepare_kprobe_ftrace(p);
  836. }
  837. /* Caller must lock kprobe_mutex */
  838. static int arm_kprobe_ftrace(struct kprobe *p)
  839. {
  840. int ret = 0;
  841. ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
  842. (unsigned long)p->addr, 0, 0);
  843. if (ret) {
  844. pr_debug("Failed to arm kprobe-ftrace at %pS (%d)\n",
  845. p->addr, ret);
  846. return ret;
  847. }
  848. if (kprobe_ftrace_enabled == 0) {
  849. ret = register_ftrace_function(&kprobe_ftrace_ops);
  850. if (ret) {
  851. pr_debug("Failed to init kprobe-ftrace (%d)\n", ret);
  852. goto err_ftrace;
  853. }
  854. }
  855. kprobe_ftrace_enabled++;
  856. return ret;
  857. err_ftrace:
  858. /*
  859. * Note: Since kprobe_ftrace_ops has IPMODIFY set, and ftrace requires a
  860. * non-empty filter_hash for IPMODIFY ops, we're safe from an accidental
  861. * empty filter_hash which would undesirably trace all functions.
  862. */
  863. ftrace_set_filter_ip(&kprobe_ftrace_ops, (unsigned long)p->addr, 1, 0);
  864. return ret;
  865. }
  866. /* Caller must lock kprobe_mutex */
  867. static int disarm_kprobe_ftrace(struct kprobe *p)
  868. {
  869. int ret = 0;
  870. if (kprobe_ftrace_enabled == 1) {
  871. ret = unregister_ftrace_function(&kprobe_ftrace_ops);
  872. if (WARN(ret < 0, "Failed to unregister kprobe-ftrace (%d)\n", ret))
  873. return ret;
  874. }
  875. kprobe_ftrace_enabled--;
  876. ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
  877. (unsigned long)p->addr, 1, 0);
  878. WARN_ONCE(ret < 0, "Failed to disarm kprobe-ftrace at %pS (%d)\n",
  879. p->addr, ret);
  880. return ret;
  881. }
  882. #else /* !CONFIG_KPROBES_ON_FTRACE */
  883. #define prepare_kprobe(p) arch_prepare_kprobe(p)
  884. #define arm_kprobe_ftrace(p) (-ENODEV)
  885. #define disarm_kprobe_ftrace(p) (-ENODEV)
  886. #endif
  887. /* Arm a kprobe with text_mutex */
  888. static int arm_kprobe(struct kprobe *kp)
  889. {
  890. if (unlikely(kprobe_ftrace(kp)))
  891. return arm_kprobe_ftrace(kp);
  892. cpus_read_lock();
  893. mutex_lock(&text_mutex);
  894. __arm_kprobe(kp);
  895. mutex_unlock(&text_mutex);
  896. cpus_read_unlock();
  897. return 0;
  898. }
  899. /* Disarm a kprobe with text_mutex */
  900. static int disarm_kprobe(struct kprobe *kp, bool reopt)
  901. {
  902. if (unlikely(kprobe_ftrace(kp)))
  903. return disarm_kprobe_ftrace(kp);
  904. cpus_read_lock();
  905. mutex_lock(&text_mutex);
  906. __disarm_kprobe(kp, reopt);
  907. mutex_unlock(&text_mutex);
  908. cpus_read_unlock();
  909. return 0;
  910. }
  911. /*
  912. * Aggregate handlers for multiple kprobes support - these handlers
  913. * take care of invoking the individual kprobe handlers on p->list
  914. */
  915. static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
  916. {
  917. struct kprobe *kp;
  918. list_for_each_entry_rcu(kp, &p->list, list) {
  919. if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
  920. set_kprobe_instance(kp);
  921. if (kp->pre_handler(kp, regs))
  922. return 1;
  923. }
  924. reset_kprobe_instance();
  925. }
  926. return 0;
  927. }
  928. NOKPROBE_SYMBOL(aggr_pre_handler);
  929. static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
  930. unsigned long flags)
  931. {
  932. struct kprobe *kp;
  933. list_for_each_entry_rcu(kp, &p->list, list) {
  934. if (kp->post_handler && likely(!kprobe_disabled(kp))) {
  935. set_kprobe_instance(kp);
  936. kp->post_handler(kp, regs, flags);
  937. reset_kprobe_instance();
  938. }
  939. }
  940. }
  941. NOKPROBE_SYMBOL(aggr_post_handler);
  942. static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
  943. int trapnr)
  944. {
  945. struct kprobe *cur = __this_cpu_read(kprobe_instance);
  946. /*
  947. * if we faulted "during" the execution of a user specified
  948. * probe handler, invoke just that probe's fault handler
  949. */
  950. if (cur && cur->fault_handler) {
  951. if (cur->fault_handler(cur, regs, trapnr))
  952. return 1;
  953. }
  954. return 0;
  955. }
  956. NOKPROBE_SYMBOL(aggr_fault_handler);
  957. /* Walks the list and increments nmissed count for multiprobe case */
  958. void kprobes_inc_nmissed_count(struct kprobe *p)
  959. {
  960. struct kprobe *kp;
  961. if (!kprobe_aggrprobe(p)) {
  962. p->nmissed++;
  963. } else {
  964. list_for_each_entry_rcu(kp, &p->list, list)
  965. kp->nmissed++;
  966. }
  967. return;
  968. }
  969. NOKPROBE_SYMBOL(kprobes_inc_nmissed_count);
  970. void recycle_rp_inst(struct kretprobe_instance *ri,
  971. struct hlist_head *head)
  972. {
  973. struct kretprobe *rp = ri->rp;
  974. /* remove rp inst off the rprobe_inst_table */
  975. hlist_del(&ri->hlist);
  976. INIT_HLIST_NODE(&ri->hlist);
  977. if (likely(rp)) {
  978. raw_spin_lock(&rp->lock);
  979. hlist_add_head(&ri->hlist, &rp->free_instances);
  980. raw_spin_unlock(&rp->lock);
  981. } else
  982. /* Unregistering */
  983. hlist_add_head(&ri->hlist, head);
  984. }
  985. NOKPROBE_SYMBOL(recycle_rp_inst);
  986. void kretprobe_hash_lock(struct task_struct *tsk,
  987. struct hlist_head **head, unsigned long *flags)
  988. __acquires(hlist_lock)
  989. {
  990. unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
  991. raw_spinlock_t *hlist_lock;
  992. *head = &kretprobe_inst_table[hash];
  993. hlist_lock = kretprobe_table_lock_ptr(hash);
  994. raw_spin_lock_irqsave(hlist_lock, *flags);
  995. }
  996. NOKPROBE_SYMBOL(kretprobe_hash_lock);
  997. static void kretprobe_table_lock(unsigned long hash,
  998. unsigned long *flags)
  999. __acquires(hlist_lock)
  1000. {
  1001. raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
  1002. raw_spin_lock_irqsave(hlist_lock, *flags);
  1003. }
  1004. NOKPROBE_SYMBOL(kretprobe_table_lock);
  1005. void kretprobe_hash_unlock(struct task_struct *tsk,
  1006. unsigned long *flags)
  1007. __releases(hlist_lock)
  1008. {
  1009. unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
  1010. raw_spinlock_t *hlist_lock;
  1011. hlist_lock = kretprobe_table_lock_ptr(hash);
  1012. raw_spin_unlock_irqrestore(hlist_lock, *flags);
  1013. }
  1014. NOKPROBE_SYMBOL(kretprobe_hash_unlock);
  1015. static void kretprobe_table_unlock(unsigned long hash,
  1016. unsigned long *flags)
  1017. __releases(hlist_lock)
  1018. {
  1019. raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
  1020. raw_spin_unlock_irqrestore(hlist_lock, *flags);
  1021. }
  1022. NOKPROBE_SYMBOL(kretprobe_table_unlock);
  1023. /*
  1024. * This function is called from finish_task_switch when task tk becomes dead,
  1025. * so that we can recycle any function-return probe instances associated
  1026. * with this task. These left over instances represent probed functions
  1027. * that have been called but will never return.
  1028. */
  1029. void kprobe_flush_task(struct task_struct *tk)
  1030. {
  1031. struct kretprobe_instance *ri;
  1032. struct hlist_head *head, empty_rp;
  1033. struct hlist_node *tmp;
  1034. unsigned long hash, flags = 0;
  1035. if (unlikely(!kprobes_initialized))
  1036. /* Early boot. kretprobe_table_locks not yet initialized. */
  1037. return;
  1038. INIT_HLIST_HEAD(&empty_rp);
  1039. hash = hash_ptr(tk, KPROBE_HASH_BITS);
  1040. head = &kretprobe_inst_table[hash];
  1041. kretprobe_table_lock(hash, &flags);
  1042. hlist_for_each_entry_safe(ri, tmp, head, hlist) {
  1043. if (ri->task == tk)
  1044. recycle_rp_inst(ri, &empty_rp);
  1045. }
  1046. kretprobe_table_unlock(hash, &flags);
  1047. hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
  1048. hlist_del(&ri->hlist);
  1049. kfree(ri);
  1050. }
  1051. }
  1052. NOKPROBE_SYMBOL(kprobe_flush_task);
  1053. static inline void free_rp_inst(struct kretprobe *rp)
  1054. {
  1055. struct kretprobe_instance *ri;
  1056. struct hlist_node *next;
  1057. hlist_for_each_entry_safe(ri, next, &rp->free_instances, hlist) {
  1058. hlist_del(&ri->hlist);
  1059. kfree(ri);
  1060. }
  1061. }
  1062. static void cleanup_rp_inst(struct kretprobe *rp)
  1063. {
  1064. unsigned long flags, hash;
  1065. struct kretprobe_instance *ri;
  1066. struct hlist_node *next;
  1067. struct hlist_head *head;
  1068. /* No race here */
  1069. for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
  1070. kretprobe_table_lock(hash, &flags);
  1071. head = &kretprobe_inst_table[hash];
  1072. hlist_for_each_entry_safe(ri, next, head, hlist) {
  1073. if (ri->rp == rp)
  1074. ri->rp = NULL;
  1075. }
  1076. kretprobe_table_unlock(hash, &flags);
  1077. }
  1078. free_rp_inst(rp);
  1079. }
  1080. NOKPROBE_SYMBOL(cleanup_rp_inst);
  1081. /* Add the new probe to ap->list */
  1082. static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
  1083. {
  1084. BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
  1085. if (p->post_handler)
  1086. unoptimize_kprobe(ap, true); /* Fall back to normal kprobe */
  1087. list_add_rcu(&p->list, &ap->list);
  1088. if (p->post_handler && !ap->post_handler)
  1089. ap->post_handler = aggr_post_handler;
  1090. return 0;
  1091. }
  1092. /*
  1093. * Fill in the required fields of the "manager kprobe". Replace the
  1094. * earlier kprobe in the hlist with the manager kprobe
  1095. */
  1096. static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
  1097. {
  1098. /* Copy p's insn slot to ap */
  1099. copy_kprobe(p, ap);
  1100. flush_insn_slot(ap);
  1101. ap->addr = p->addr;
  1102. ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
  1103. ap->pre_handler = aggr_pre_handler;
  1104. ap->fault_handler = aggr_fault_handler;
  1105. /* We don't care the kprobe which has gone. */
  1106. if (p->post_handler && !kprobe_gone(p))
  1107. ap->post_handler = aggr_post_handler;
  1108. INIT_LIST_HEAD(&ap->list);
  1109. INIT_HLIST_NODE(&ap->hlist);
  1110. list_add_rcu(&p->list, &ap->list);
  1111. hlist_replace_rcu(&p->hlist, &ap->hlist);
  1112. }
  1113. /*
  1114. * This is the second or subsequent kprobe at the address - handle
  1115. * the intricacies
  1116. */
  1117. static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
  1118. {
  1119. int ret = 0;
  1120. struct kprobe *ap = orig_p;
  1121. cpus_read_lock();
  1122. /* For preparing optimization, jump_label_text_reserved() is called */
  1123. jump_label_lock();
  1124. mutex_lock(&text_mutex);
  1125. if (!kprobe_aggrprobe(orig_p)) {
  1126. /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */
  1127. ap = alloc_aggr_kprobe(orig_p);
  1128. if (!ap) {
  1129. ret = -ENOMEM;
  1130. goto out;
  1131. }
  1132. init_aggr_kprobe(ap, orig_p);
  1133. } else if (kprobe_unused(ap))
  1134. /* This probe is going to die. Rescue it */
  1135. reuse_unused_kprobe(ap);
  1136. if (kprobe_gone(ap)) {
  1137. /*
  1138. * Attempting to insert new probe at the same location that
  1139. * had a probe in the module vaddr area which already
  1140. * freed. So, the instruction slot has already been
  1141. * released. We need a new slot for the new probe.
  1142. */
  1143. ret = arch_prepare_kprobe(ap);
  1144. if (ret)
  1145. /*
  1146. * Even if fail to allocate new slot, don't need to
  1147. * free aggr_probe. It will be used next time, or
  1148. * freed by unregister_kprobe.
  1149. */
  1150. goto out;
  1151. /* Prepare optimized instructions if possible. */
  1152. prepare_optimized_kprobe(ap);
  1153. /*
  1154. * Clear gone flag to prevent allocating new slot again, and
  1155. * set disabled flag because it is not armed yet.
  1156. */
  1157. ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
  1158. | KPROBE_FLAG_DISABLED;
  1159. }
  1160. /* Copy ap's insn slot to p */
  1161. copy_kprobe(ap, p);
  1162. ret = add_new_kprobe(ap, p);
  1163. out:
  1164. mutex_unlock(&text_mutex);
  1165. jump_label_unlock();
  1166. cpus_read_unlock();
  1167. if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
  1168. ap->flags &= ~KPROBE_FLAG_DISABLED;
  1169. if (!kprobes_all_disarmed) {
  1170. /* Arm the breakpoint again. */
  1171. ret = arm_kprobe(ap);
  1172. if (ret) {
  1173. ap->flags |= KPROBE_FLAG_DISABLED;
  1174. list_del_rcu(&p->list);
  1175. synchronize_sched();
  1176. }
  1177. }
  1178. }
  1179. return ret;
  1180. }
  1181. bool __weak arch_within_kprobe_blacklist(unsigned long addr)
  1182. {
  1183. /* The __kprobes marked functions and entry code must not be probed */
  1184. return addr >= (unsigned long)__kprobes_text_start &&
  1185. addr < (unsigned long)__kprobes_text_end;
  1186. }
  1187. bool within_kprobe_blacklist(unsigned long addr)
  1188. {
  1189. struct kprobe_blacklist_entry *ent;
  1190. if (arch_within_kprobe_blacklist(addr))
  1191. return true;
  1192. /*
  1193. * If there exists a kprobe_blacklist, verify and
  1194. * fail any probe registration in the prohibited area
  1195. */
  1196. list_for_each_entry(ent, &kprobe_blacklist, list) {
  1197. if (addr >= ent->start_addr && addr < ent->end_addr)
  1198. return true;
  1199. }
  1200. return false;
  1201. }
  1202. /*
  1203. * If we have a symbol_name argument, look it up and add the offset field
  1204. * to it. This way, we can specify a relative address to a symbol.
  1205. * This returns encoded errors if it fails to look up symbol or invalid
  1206. * combination of parameters.
  1207. */
  1208. static kprobe_opcode_t *_kprobe_addr(kprobe_opcode_t *addr,
  1209. const char *symbol_name, unsigned int offset)
  1210. {
  1211. if ((symbol_name && addr) || (!symbol_name && !addr))
  1212. goto invalid;
  1213. if (symbol_name) {
  1214. addr = kprobe_lookup_name(symbol_name, offset);
  1215. if (!addr)
  1216. return ERR_PTR(-ENOENT);
  1217. }
  1218. addr = (kprobe_opcode_t *)(((char *)addr) + offset);
  1219. if (addr)
  1220. return addr;
  1221. invalid:
  1222. return ERR_PTR(-EINVAL);
  1223. }
  1224. static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
  1225. {
  1226. return _kprobe_addr(p->addr, p->symbol_name, p->offset);
  1227. }
  1228. /* Check passed kprobe is valid and return kprobe in kprobe_table. */
  1229. static struct kprobe *__get_valid_kprobe(struct kprobe *p)
  1230. {
  1231. struct kprobe *ap, *list_p;
  1232. ap = get_kprobe(p->addr);
  1233. if (unlikely(!ap))
  1234. return NULL;
  1235. if (p != ap) {
  1236. list_for_each_entry_rcu(list_p, &ap->list, list)
  1237. if (list_p == p)
  1238. /* kprobe p is a valid probe */
  1239. goto valid;
  1240. return NULL;
  1241. }
  1242. valid:
  1243. return ap;
  1244. }
  1245. /* Return error if the kprobe is being re-registered */
  1246. static inline int check_kprobe_rereg(struct kprobe *p)
  1247. {
  1248. int ret = 0;
  1249. mutex_lock(&kprobe_mutex);
  1250. if (__get_valid_kprobe(p))
  1251. ret = -EINVAL;
  1252. mutex_unlock(&kprobe_mutex);
  1253. return ret;
  1254. }
  1255. int __weak arch_check_ftrace_location(struct kprobe *p)
  1256. {
  1257. unsigned long ftrace_addr;
  1258. ftrace_addr = ftrace_location((unsigned long)p->addr);
  1259. if (ftrace_addr) {
  1260. #ifdef CONFIG_KPROBES_ON_FTRACE
  1261. /* Given address is not on the instruction boundary */
  1262. if ((unsigned long)p->addr != ftrace_addr)
  1263. return -EILSEQ;
  1264. p->flags |= KPROBE_FLAG_FTRACE;
  1265. #else /* !CONFIG_KPROBES_ON_FTRACE */
  1266. return -EINVAL;
  1267. #endif
  1268. }
  1269. return 0;
  1270. }
  1271. static int check_kprobe_address_safe(struct kprobe *p,
  1272. struct module **probed_mod)
  1273. {
  1274. int ret;
  1275. ret = arch_check_ftrace_location(p);
  1276. if (ret)
  1277. return ret;
  1278. jump_label_lock();
  1279. preempt_disable();
  1280. /* Ensure it is not in reserved area nor out of text */
  1281. if (!kernel_text_address((unsigned long) p->addr) ||
  1282. within_kprobe_blacklist((unsigned long) p->addr) ||
  1283. jump_label_text_reserved(p->addr, p->addr)) {
  1284. ret = -EINVAL;
  1285. goto out;
  1286. }
  1287. /* Check if are we probing a module */
  1288. *probed_mod = __module_text_address((unsigned long) p->addr);
  1289. if (*probed_mod) {
  1290. /*
  1291. * We must hold a refcount of the probed module while updating
  1292. * its code to prohibit unexpected unloading.
  1293. */
  1294. if (unlikely(!try_module_get(*probed_mod))) {
  1295. ret = -ENOENT;
  1296. goto out;
  1297. }
  1298. /*
  1299. * If the module freed .init.text, we couldn't insert
  1300. * kprobes in there.
  1301. */
  1302. if (within_module_init((unsigned long)p->addr, *probed_mod) &&
  1303. (*probed_mod)->state != MODULE_STATE_COMING) {
  1304. module_put(*probed_mod);
  1305. *probed_mod = NULL;
  1306. ret = -ENOENT;
  1307. }
  1308. }
  1309. out:
  1310. preempt_enable();
  1311. jump_label_unlock();
  1312. return ret;
  1313. }
  1314. int register_kprobe(struct kprobe *p)
  1315. {
  1316. int ret;
  1317. struct kprobe *old_p;
  1318. struct module *probed_mod;
  1319. kprobe_opcode_t *addr;
  1320. /* Adjust probe address from symbol */
  1321. addr = kprobe_addr(p);
  1322. if (IS_ERR(addr))
  1323. return PTR_ERR(addr);
  1324. p->addr = addr;
  1325. ret = check_kprobe_rereg(p);
  1326. if (ret)
  1327. return ret;
  1328. /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
  1329. p->flags &= KPROBE_FLAG_DISABLED;
  1330. p->nmissed = 0;
  1331. INIT_LIST_HEAD(&p->list);
  1332. ret = check_kprobe_address_safe(p, &probed_mod);
  1333. if (ret)
  1334. return ret;
  1335. mutex_lock(&kprobe_mutex);
  1336. old_p = get_kprobe(p->addr);
  1337. if (old_p) {
  1338. /* Since this may unoptimize old_p, locking text_mutex. */
  1339. ret = register_aggr_kprobe(old_p, p);
  1340. goto out;
  1341. }
  1342. cpus_read_lock();
  1343. /* Prevent text modification */
  1344. mutex_lock(&text_mutex);
  1345. ret = prepare_kprobe(p);
  1346. mutex_unlock(&text_mutex);
  1347. cpus_read_unlock();
  1348. if (ret)
  1349. goto out;
  1350. INIT_HLIST_NODE(&p->hlist);
  1351. hlist_add_head_rcu(&p->hlist,
  1352. &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
  1353. if (!kprobes_all_disarmed && !kprobe_disabled(p)) {
  1354. ret = arm_kprobe(p);
  1355. if (ret) {
  1356. hlist_del_rcu(&p->hlist);
  1357. synchronize_sched();
  1358. goto out;
  1359. }
  1360. }
  1361. /* Try to optimize kprobe */
  1362. try_to_optimize_kprobe(p);
  1363. out:
  1364. mutex_unlock(&kprobe_mutex);
  1365. if (probed_mod)
  1366. module_put(probed_mod);
  1367. return ret;
  1368. }
  1369. EXPORT_SYMBOL_GPL(register_kprobe);
  1370. /* Check if all probes on the aggrprobe are disabled */
  1371. static int aggr_kprobe_disabled(struct kprobe *ap)
  1372. {
  1373. struct kprobe *kp;
  1374. list_for_each_entry_rcu(kp, &ap->list, list)
  1375. if (!kprobe_disabled(kp))
  1376. /*
  1377. * There is an active probe on the list.
  1378. * We can't disable this ap.
  1379. */
  1380. return 0;
  1381. return 1;
  1382. }
  1383. /* Disable one kprobe: Make sure called under kprobe_mutex is locked */
  1384. static struct kprobe *__disable_kprobe(struct kprobe *p)
  1385. {
  1386. struct kprobe *orig_p;
  1387. int ret;
  1388. /* Get an original kprobe for return */
  1389. orig_p = __get_valid_kprobe(p);
  1390. if (unlikely(orig_p == NULL))
  1391. return ERR_PTR(-EINVAL);
  1392. if (!kprobe_disabled(p)) {
  1393. /* Disable probe if it is a child probe */
  1394. if (p != orig_p)
  1395. p->flags |= KPROBE_FLAG_DISABLED;
  1396. /* Try to disarm and disable this/parent probe */
  1397. if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
  1398. /*
  1399. * If kprobes_all_disarmed is set, orig_p
  1400. * should have already been disarmed, so
  1401. * skip unneed disarming process.
  1402. */
  1403. if (!kprobes_all_disarmed) {
  1404. ret = disarm_kprobe(orig_p, true);
  1405. if (ret) {
  1406. p->flags &= ~KPROBE_FLAG_DISABLED;
  1407. return ERR_PTR(ret);
  1408. }
  1409. }
  1410. orig_p->flags |= KPROBE_FLAG_DISABLED;
  1411. }
  1412. }
  1413. return orig_p;
  1414. }
  1415. /*
  1416. * Unregister a kprobe without a scheduler synchronization.
  1417. */
  1418. static int __unregister_kprobe_top(struct kprobe *p)
  1419. {
  1420. struct kprobe *ap, *list_p;
  1421. /* Disable kprobe. This will disarm it if needed. */
  1422. ap = __disable_kprobe(p);
  1423. if (IS_ERR(ap))
  1424. return PTR_ERR(ap);
  1425. if (ap == p)
  1426. /*
  1427. * This probe is an independent(and non-optimized) kprobe
  1428. * (not an aggrprobe). Remove from the hash list.
  1429. */
  1430. goto disarmed;
  1431. /* Following process expects this probe is an aggrprobe */
  1432. WARN_ON(!kprobe_aggrprobe(ap));
  1433. if (list_is_singular(&ap->list) && kprobe_disarmed(ap))
  1434. /*
  1435. * !disarmed could be happen if the probe is under delayed
  1436. * unoptimizing.
  1437. */
  1438. goto disarmed;
  1439. else {
  1440. /* If disabling probe has special handlers, update aggrprobe */
  1441. if (p->post_handler && !kprobe_gone(p)) {
  1442. list_for_each_entry_rcu(list_p, &ap->list, list) {
  1443. if ((list_p != p) && (list_p->post_handler))
  1444. goto noclean;
  1445. }
  1446. ap->post_handler = NULL;
  1447. }
  1448. noclean:
  1449. /*
  1450. * Remove from the aggrprobe: this path will do nothing in
  1451. * __unregister_kprobe_bottom().
  1452. */
  1453. list_del_rcu(&p->list);
  1454. if (!kprobe_disabled(ap) && !kprobes_all_disarmed)
  1455. /*
  1456. * Try to optimize this probe again, because post
  1457. * handler may have been changed.
  1458. */
  1459. optimize_kprobe(ap);
  1460. }
  1461. return 0;
  1462. disarmed:
  1463. BUG_ON(!kprobe_disarmed(ap));
  1464. hlist_del_rcu(&ap->hlist);
  1465. return 0;
  1466. }
  1467. static void __unregister_kprobe_bottom(struct kprobe *p)
  1468. {
  1469. struct kprobe *ap;
  1470. if (list_empty(&p->list))
  1471. /* This is an independent kprobe */
  1472. arch_remove_kprobe(p);
  1473. else if (list_is_singular(&p->list)) {
  1474. /* This is the last child of an aggrprobe */
  1475. ap = list_entry(p->list.next, struct kprobe, list);
  1476. list_del(&p->list);
  1477. free_aggr_kprobe(ap);
  1478. }
  1479. /* Otherwise, do nothing. */
  1480. }
  1481. int register_kprobes(struct kprobe **kps, int num)
  1482. {
  1483. int i, ret = 0;
  1484. if (num <= 0)
  1485. return -EINVAL;
  1486. for (i = 0; i < num; i++) {
  1487. ret = register_kprobe(kps[i]);
  1488. if (ret < 0) {
  1489. if (i > 0)
  1490. unregister_kprobes(kps, i);
  1491. break;
  1492. }
  1493. }
  1494. return ret;
  1495. }
  1496. EXPORT_SYMBOL_GPL(register_kprobes);
  1497. void unregister_kprobe(struct kprobe *p)
  1498. {
  1499. unregister_kprobes(&p, 1);
  1500. }
  1501. EXPORT_SYMBOL_GPL(unregister_kprobe);
  1502. void unregister_kprobes(struct kprobe **kps, int num)
  1503. {
  1504. int i;
  1505. if (num <= 0)
  1506. return;
  1507. mutex_lock(&kprobe_mutex);
  1508. for (i = 0; i < num; i++)
  1509. if (__unregister_kprobe_top(kps[i]) < 0)
  1510. kps[i]->addr = NULL;
  1511. mutex_unlock(&kprobe_mutex);
  1512. synchronize_sched();
  1513. for (i = 0; i < num; i++)
  1514. if (kps[i]->addr)
  1515. __unregister_kprobe_bottom(kps[i]);
  1516. }
  1517. EXPORT_SYMBOL_GPL(unregister_kprobes);
  1518. int __weak kprobe_exceptions_notify(struct notifier_block *self,
  1519. unsigned long val, void *data)
  1520. {
  1521. return NOTIFY_DONE;
  1522. }
  1523. NOKPROBE_SYMBOL(kprobe_exceptions_notify);
  1524. static struct notifier_block kprobe_exceptions_nb = {
  1525. .notifier_call = kprobe_exceptions_notify,
  1526. .priority = 0x7fffffff /* we need to be notified first */
  1527. };
  1528. unsigned long __weak arch_deref_entry_point(void *entry)
  1529. {
  1530. return (unsigned long)entry;
  1531. }
  1532. #ifdef CONFIG_KRETPROBES
  1533. /*
  1534. * This kprobe pre_handler is registered with every kretprobe. When probe
  1535. * hits it will set up the return probe.
  1536. */
  1537. static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
  1538. {
  1539. struct kretprobe *rp = container_of(p, struct kretprobe, kp);
  1540. unsigned long hash, flags = 0;
  1541. struct kretprobe_instance *ri;
  1542. /*
  1543. * To avoid deadlocks, prohibit return probing in NMI contexts,
  1544. * just skip the probe and increase the (inexact) 'nmissed'
  1545. * statistical counter, so that the user is informed that
  1546. * something happened:
  1547. */
  1548. if (unlikely(in_nmi())) {
  1549. rp->nmissed++;
  1550. return 0;
  1551. }
  1552. /* TODO: consider to only swap the RA after the last pre_handler fired */
  1553. hash = hash_ptr(current, KPROBE_HASH_BITS);
  1554. raw_spin_lock_irqsave(&rp->lock, flags);
  1555. if (!hlist_empty(&rp->free_instances)) {
  1556. ri = hlist_entry(rp->free_instances.first,
  1557. struct kretprobe_instance, hlist);
  1558. hlist_del(&ri->hlist);
  1559. raw_spin_unlock_irqrestore(&rp->lock, flags);
  1560. ri->rp = rp;
  1561. ri->task = current;
  1562. if (rp->entry_handler && rp->entry_handler(ri, regs)) {
  1563. raw_spin_lock_irqsave(&rp->lock, flags);
  1564. hlist_add_head(&ri->hlist, &rp->free_instances);
  1565. raw_spin_unlock_irqrestore(&rp->lock, flags);
  1566. return 0;
  1567. }
  1568. arch_prepare_kretprobe(ri, regs);
  1569. /* XXX(hch): why is there no hlist_move_head? */
  1570. INIT_HLIST_NODE(&ri->hlist);
  1571. kretprobe_table_lock(hash, &flags);
  1572. hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
  1573. kretprobe_table_unlock(hash, &flags);
  1574. } else {
  1575. rp->nmissed++;
  1576. raw_spin_unlock_irqrestore(&rp->lock, flags);
  1577. }
  1578. return 0;
  1579. }
  1580. NOKPROBE_SYMBOL(pre_handler_kretprobe);
  1581. bool __weak arch_kprobe_on_func_entry(unsigned long offset)
  1582. {
  1583. return !offset;
  1584. }
  1585. bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
  1586. {
  1587. kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset);
  1588. if (IS_ERR(kp_addr))
  1589. return false;
  1590. if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset) ||
  1591. !arch_kprobe_on_func_entry(offset))
  1592. return false;
  1593. return true;
  1594. }
  1595. int register_kretprobe(struct kretprobe *rp)
  1596. {
  1597. int ret = 0;
  1598. struct kretprobe_instance *inst;
  1599. int i;
  1600. void *addr;
  1601. if (!kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset))
  1602. return -EINVAL;
  1603. if (kretprobe_blacklist_size) {
  1604. addr = kprobe_addr(&rp->kp);
  1605. if (IS_ERR(addr))
  1606. return PTR_ERR(addr);
  1607. for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
  1608. if (kretprobe_blacklist[i].addr == addr)
  1609. return -EINVAL;
  1610. }
  1611. }
  1612. rp->kp.pre_handler = pre_handler_kretprobe;
  1613. rp->kp.post_handler = NULL;
  1614. rp->kp.fault_handler = NULL;
  1615. /* Pre-allocate memory for max kretprobe instances */
  1616. if (rp->maxactive <= 0) {
  1617. #ifdef CONFIG_PREEMPT
  1618. rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
  1619. #else
  1620. rp->maxactive = num_possible_cpus();
  1621. #endif
  1622. }
  1623. raw_spin_lock_init(&rp->lock);
  1624. INIT_HLIST_HEAD(&rp->free_instances);
  1625. for (i = 0; i < rp->maxactive; i++) {
  1626. inst = kmalloc(sizeof(struct kretprobe_instance) +
  1627. rp->data_size, GFP_KERNEL);
  1628. if (inst == NULL) {
  1629. free_rp_inst(rp);
  1630. return -ENOMEM;
  1631. }
  1632. INIT_HLIST_NODE(&inst->hlist);
  1633. hlist_add_head(&inst->hlist, &rp->free_instances);
  1634. }
  1635. rp->nmissed = 0;
  1636. /* Establish function entry probe point */
  1637. ret = register_kprobe(&rp->kp);
  1638. if (ret != 0)
  1639. free_rp_inst(rp);
  1640. return ret;
  1641. }
  1642. EXPORT_SYMBOL_GPL(register_kretprobe);
  1643. int register_kretprobes(struct kretprobe **rps, int num)
  1644. {
  1645. int ret = 0, i;
  1646. if (num <= 0)
  1647. return -EINVAL;
  1648. for (i = 0; i < num; i++) {
  1649. ret = register_kretprobe(rps[i]);
  1650. if (ret < 0) {
  1651. if (i > 0)
  1652. unregister_kretprobes(rps, i);
  1653. break;
  1654. }
  1655. }
  1656. return ret;
  1657. }
  1658. EXPORT_SYMBOL_GPL(register_kretprobes);
  1659. void unregister_kretprobe(struct kretprobe *rp)
  1660. {
  1661. unregister_kretprobes(&rp, 1);
  1662. }
  1663. EXPORT_SYMBOL_GPL(unregister_kretprobe);
  1664. void unregister_kretprobes(struct kretprobe **rps, int num)
  1665. {
  1666. int i;
  1667. if (num <= 0)
  1668. return;
  1669. mutex_lock(&kprobe_mutex);
  1670. for (i = 0; i < num; i++)
  1671. if (__unregister_kprobe_top(&rps[i]->kp) < 0)
  1672. rps[i]->kp.addr = NULL;
  1673. mutex_unlock(&kprobe_mutex);
  1674. synchronize_sched();
  1675. for (i = 0; i < num; i++) {
  1676. if (rps[i]->kp.addr) {
  1677. __unregister_kprobe_bottom(&rps[i]->kp);
  1678. cleanup_rp_inst(rps[i]);
  1679. }
  1680. }
  1681. }
  1682. EXPORT_SYMBOL_GPL(unregister_kretprobes);
  1683. #else /* CONFIG_KRETPROBES */
  1684. int register_kretprobe(struct kretprobe *rp)
  1685. {
  1686. return -ENOSYS;
  1687. }
  1688. EXPORT_SYMBOL_GPL(register_kretprobe);
  1689. int register_kretprobes(struct kretprobe **rps, int num)
  1690. {
  1691. return -ENOSYS;
  1692. }
  1693. EXPORT_SYMBOL_GPL(register_kretprobes);
  1694. void unregister_kretprobe(struct kretprobe *rp)
  1695. {
  1696. }
  1697. EXPORT_SYMBOL_GPL(unregister_kretprobe);
  1698. void unregister_kretprobes(struct kretprobe **rps, int num)
  1699. {
  1700. }
  1701. EXPORT_SYMBOL_GPL(unregister_kretprobes);
  1702. static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
  1703. {
  1704. return 0;
  1705. }
  1706. NOKPROBE_SYMBOL(pre_handler_kretprobe);
  1707. #endif /* CONFIG_KRETPROBES */
  1708. /* Set the kprobe gone and remove its instruction buffer. */
  1709. static void kill_kprobe(struct kprobe *p)
  1710. {
  1711. struct kprobe *kp;
  1712. p->flags |= KPROBE_FLAG_GONE;
  1713. if (kprobe_aggrprobe(p)) {
  1714. /*
  1715. * If this is an aggr_kprobe, we have to list all the
  1716. * chained probes and mark them GONE.
  1717. */
  1718. list_for_each_entry_rcu(kp, &p->list, list)
  1719. kp->flags |= KPROBE_FLAG_GONE;
  1720. p->post_handler = NULL;
  1721. kill_optimized_kprobe(p);
  1722. }
  1723. /*
  1724. * Here, we can remove insn_slot safely, because no thread calls
  1725. * the original probed function (which will be freed soon) any more.
  1726. */
  1727. arch_remove_kprobe(p);
  1728. }
  1729. /* Disable one kprobe */
  1730. int disable_kprobe(struct kprobe *kp)
  1731. {
  1732. int ret = 0;
  1733. struct kprobe *p;
  1734. mutex_lock(&kprobe_mutex);
  1735. /* Disable this kprobe */
  1736. p = __disable_kprobe(kp);
  1737. if (IS_ERR(p))
  1738. ret = PTR_ERR(p);
  1739. mutex_unlock(&kprobe_mutex);
  1740. return ret;
  1741. }
  1742. EXPORT_SYMBOL_GPL(disable_kprobe);
  1743. /* Enable one kprobe */
  1744. int enable_kprobe(struct kprobe *kp)
  1745. {
  1746. int ret = 0;
  1747. struct kprobe *p;
  1748. mutex_lock(&kprobe_mutex);
  1749. /* Check whether specified probe is valid. */
  1750. p = __get_valid_kprobe(kp);
  1751. if (unlikely(p == NULL)) {
  1752. ret = -EINVAL;
  1753. goto out;
  1754. }
  1755. if (kprobe_gone(kp)) {
  1756. /* This kprobe has gone, we couldn't enable it. */
  1757. ret = -EINVAL;
  1758. goto out;
  1759. }
  1760. if (p != kp)
  1761. kp->flags &= ~KPROBE_FLAG_DISABLED;
  1762. if (!kprobes_all_disarmed && kprobe_disabled(p)) {
  1763. p->flags &= ~KPROBE_FLAG_DISABLED;
  1764. ret = arm_kprobe(p);
  1765. if (ret)
  1766. p->flags |= KPROBE_FLAG_DISABLED;
  1767. }
  1768. out:
  1769. mutex_unlock(&kprobe_mutex);
  1770. return ret;
  1771. }
  1772. EXPORT_SYMBOL_GPL(enable_kprobe);
  1773. /* Caller must NOT call this in usual path. This is only for critical case */
  1774. void dump_kprobe(struct kprobe *kp)
  1775. {
  1776. pr_err("Dumping kprobe:\n");
  1777. pr_err("Name: %s\nOffset: %x\nAddress: %pS\n",
  1778. kp->symbol_name, kp->offset, kp->addr);
  1779. }
  1780. NOKPROBE_SYMBOL(dump_kprobe);
  1781. /*
  1782. * Lookup and populate the kprobe_blacklist.
  1783. *
  1784. * Unlike the kretprobe blacklist, we'll need to determine
  1785. * the range of addresses that belong to the said functions,
  1786. * since a kprobe need not necessarily be at the beginning
  1787. * of a function.
  1788. */
  1789. static int __init populate_kprobe_blacklist(unsigned long *start,
  1790. unsigned long *end)
  1791. {
  1792. unsigned long *iter;
  1793. struct kprobe_blacklist_entry *ent;
  1794. unsigned long entry, offset = 0, size = 0;
  1795. for (iter = start; iter < end; iter++) {
  1796. entry = arch_deref_entry_point((void *)*iter);
  1797. if (!kernel_text_address(entry) ||
  1798. !kallsyms_lookup_size_offset(entry, &size, &offset))
  1799. continue;
  1800. ent = kmalloc(sizeof(*ent), GFP_KERNEL);
  1801. if (!ent)
  1802. return -ENOMEM;
  1803. ent->start_addr = entry;
  1804. ent->end_addr = entry + size;
  1805. INIT_LIST_HEAD(&ent->list);
  1806. list_add_tail(&ent->list, &kprobe_blacklist);
  1807. }
  1808. return 0;
  1809. }
  1810. /* Module notifier call back, checking kprobes on the module */
  1811. static int kprobes_module_callback(struct notifier_block *nb,
  1812. unsigned long val, void *data)
  1813. {
  1814. struct module *mod = data;
  1815. struct hlist_head *head;
  1816. struct kprobe *p;
  1817. unsigned int i;
  1818. int checkcore = (val == MODULE_STATE_GOING);
  1819. if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
  1820. return NOTIFY_DONE;
  1821. /*
  1822. * When MODULE_STATE_GOING was notified, both of module .text and
  1823. * .init.text sections would be freed. When MODULE_STATE_LIVE was
  1824. * notified, only .init.text section would be freed. We need to
  1825. * disable kprobes which have been inserted in the sections.
  1826. */
  1827. mutex_lock(&kprobe_mutex);
  1828. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  1829. head = &kprobe_table[i];
  1830. hlist_for_each_entry_rcu(p, head, hlist)
  1831. if (within_module_init((unsigned long)p->addr, mod) ||
  1832. (checkcore &&
  1833. within_module_core((unsigned long)p->addr, mod))) {
  1834. /*
  1835. * The vaddr this probe is installed will soon
  1836. * be vfreed buy not synced to disk. Hence,
  1837. * disarming the breakpoint isn't needed.
  1838. *
  1839. * Note, this will also move any optimized probes
  1840. * that are pending to be removed from their
  1841. * corresponding lists to the freeing_list and
  1842. * will not be touched by the delayed
  1843. * kprobe_optimizer work handler.
  1844. */
  1845. kill_kprobe(p);
  1846. }
  1847. }
  1848. mutex_unlock(&kprobe_mutex);
  1849. return NOTIFY_DONE;
  1850. }
  1851. static struct notifier_block kprobe_module_nb = {
  1852. .notifier_call = kprobes_module_callback,
  1853. .priority = 0
  1854. };
  1855. /* Markers of _kprobe_blacklist section */
  1856. extern unsigned long __start_kprobe_blacklist[];
  1857. extern unsigned long __stop_kprobe_blacklist[];
  1858. static int __init init_kprobes(void)
  1859. {
  1860. int i, err = 0;
  1861. /* FIXME allocate the probe table, currently defined statically */
  1862. /* initialize all list heads */
  1863. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  1864. INIT_HLIST_HEAD(&kprobe_table[i]);
  1865. INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
  1866. raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
  1867. }
  1868. err = populate_kprobe_blacklist(__start_kprobe_blacklist,
  1869. __stop_kprobe_blacklist);
  1870. if (err) {
  1871. pr_err("kprobes: failed to populate blacklist: %d\n", err);
  1872. pr_err("Please take care of using kprobes.\n");
  1873. }
  1874. if (kretprobe_blacklist_size) {
  1875. /* lookup the function address from its name */
  1876. for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
  1877. kretprobe_blacklist[i].addr =
  1878. kprobe_lookup_name(kretprobe_blacklist[i].name, 0);
  1879. if (!kretprobe_blacklist[i].addr)
  1880. printk("kretprobe: lookup failed: %s\n",
  1881. kretprobe_blacklist[i].name);
  1882. }
  1883. }
  1884. #if defined(CONFIG_OPTPROBES)
  1885. #if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
  1886. /* Init kprobe_optinsn_slots */
  1887. kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
  1888. #endif
  1889. /* By default, kprobes can be optimized */
  1890. kprobes_allow_optimization = true;
  1891. #endif
  1892. /* By default, kprobes are armed */
  1893. kprobes_all_disarmed = false;
  1894. err = arch_init_kprobes();
  1895. if (!err)
  1896. err = register_die_notifier(&kprobe_exceptions_nb);
  1897. if (!err)
  1898. err = register_module_notifier(&kprobe_module_nb);
  1899. kprobes_initialized = (err == 0);
  1900. if (!err)
  1901. init_test_probes();
  1902. return err;
  1903. }
  1904. #ifdef CONFIG_DEBUG_FS
  1905. static void report_probe(struct seq_file *pi, struct kprobe *p,
  1906. const char *sym, int offset, char *modname, struct kprobe *pp)
  1907. {
  1908. char *kprobe_type;
  1909. void *addr = p->addr;
  1910. if (p->pre_handler == pre_handler_kretprobe)
  1911. kprobe_type = "r";
  1912. else
  1913. kprobe_type = "k";
  1914. if (!kallsyms_show_value())
  1915. addr = NULL;
  1916. if (sym)
  1917. seq_printf(pi, "%px %s %s+0x%x %s ",
  1918. addr, kprobe_type, sym, offset,
  1919. (modname ? modname : " "));
  1920. else /* try to use %pS */
  1921. seq_printf(pi, "%px %s %pS ",
  1922. addr, kprobe_type, p->addr);
  1923. if (!pp)
  1924. pp = p;
  1925. seq_printf(pi, "%s%s%s%s\n",
  1926. (kprobe_gone(p) ? "[GONE]" : ""),
  1927. ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""),
  1928. (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""),
  1929. (kprobe_ftrace(pp) ? "[FTRACE]" : ""));
  1930. }
  1931. static void *kprobe_seq_start(struct seq_file *f, loff_t *pos)
  1932. {
  1933. return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
  1934. }
  1935. static void *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
  1936. {
  1937. (*pos)++;
  1938. if (*pos >= KPROBE_TABLE_SIZE)
  1939. return NULL;
  1940. return pos;
  1941. }
  1942. static void kprobe_seq_stop(struct seq_file *f, void *v)
  1943. {
  1944. /* Nothing to do */
  1945. }
  1946. static int show_kprobe_addr(struct seq_file *pi, void *v)
  1947. {
  1948. struct hlist_head *head;
  1949. struct kprobe *p, *kp;
  1950. const char *sym = NULL;
  1951. unsigned int i = *(loff_t *) v;
  1952. unsigned long offset = 0;
  1953. char *modname, namebuf[KSYM_NAME_LEN];
  1954. head = &kprobe_table[i];
  1955. preempt_disable();
  1956. hlist_for_each_entry_rcu(p, head, hlist) {
  1957. sym = kallsyms_lookup((unsigned long)p->addr, NULL,
  1958. &offset, &modname, namebuf);
  1959. if (kprobe_aggrprobe(p)) {
  1960. list_for_each_entry_rcu(kp, &p->list, list)
  1961. report_probe(pi, kp, sym, offset, modname, p);
  1962. } else
  1963. report_probe(pi, p, sym, offset, modname, NULL);
  1964. }
  1965. preempt_enable();
  1966. return 0;
  1967. }
  1968. static const struct seq_operations kprobes_seq_ops = {
  1969. .start = kprobe_seq_start,
  1970. .next = kprobe_seq_next,
  1971. .stop = kprobe_seq_stop,
  1972. .show = show_kprobe_addr
  1973. };
  1974. static int kprobes_open(struct inode *inode, struct file *filp)
  1975. {
  1976. return seq_open(filp, &kprobes_seq_ops);
  1977. }
  1978. static const struct file_operations debugfs_kprobes_operations = {
  1979. .open = kprobes_open,
  1980. .read = seq_read,
  1981. .llseek = seq_lseek,
  1982. .release = seq_release,
  1983. };
  1984. /* kprobes/blacklist -- shows which functions can not be probed */
  1985. static void *kprobe_blacklist_seq_start(struct seq_file *m, loff_t *pos)
  1986. {
  1987. return seq_list_start(&kprobe_blacklist, *pos);
  1988. }
  1989. static void *kprobe_blacklist_seq_next(struct seq_file *m, void *v, loff_t *pos)
  1990. {
  1991. return seq_list_next(v, &kprobe_blacklist, pos);
  1992. }
  1993. static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
  1994. {
  1995. struct kprobe_blacklist_entry *ent =
  1996. list_entry(v, struct kprobe_blacklist_entry, list);
  1997. /*
  1998. * If /proc/kallsyms is not showing kernel address, we won't
  1999. * show them here either.
  2000. */
  2001. if (!kallsyms_show_value())
  2002. seq_printf(m, "0x%px-0x%px\t%ps\n", NULL, NULL,
  2003. (void *)ent->start_addr);
  2004. else
  2005. seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr,
  2006. (void *)ent->end_addr, (void *)ent->start_addr);
  2007. return 0;
  2008. }
  2009. static const struct seq_operations kprobe_blacklist_seq_ops = {
  2010. .start = kprobe_blacklist_seq_start,
  2011. .next = kprobe_blacklist_seq_next,
  2012. .stop = kprobe_seq_stop, /* Reuse void function */
  2013. .show = kprobe_blacklist_seq_show,
  2014. };
  2015. static int kprobe_blacklist_open(struct inode *inode, struct file *filp)
  2016. {
  2017. return seq_open(filp, &kprobe_blacklist_seq_ops);
  2018. }
  2019. static const struct file_operations debugfs_kprobe_blacklist_ops = {
  2020. .open = kprobe_blacklist_open,
  2021. .read = seq_read,
  2022. .llseek = seq_lseek,
  2023. .release = seq_release,
  2024. };
  2025. static int arm_all_kprobes(void)
  2026. {
  2027. struct hlist_head *head;
  2028. struct kprobe *p;
  2029. unsigned int i, total = 0, errors = 0;
  2030. int err, ret = 0;
  2031. mutex_lock(&kprobe_mutex);
  2032. /* If kprobes are armed, just return */
  2033. if (!kprobes_all_disarmed)
  2034. goto already_enabled;
  2035. /*
  2036. * optimize_kprobe() called by arm_kprobe() checks
  2037. * kprobes_all_disarmed, so set kprobes_all_disarmed before
  2038. * arm_kprobe.
  2039. */
  2040. kprobes_all_disarmed = false;
  2041. /* Arming kprobes doesn't optimize kprobe itself */
  2042. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  2043. head = &kprobe_table[i];
  2044. /* Arm all kprobes on a best-effort basis */
  2045. hlist_for_each_entry_rcu(p, head, hlist) {
  2046. if (!kprobe_disabled(p)) {
  2047. err = arm_kprobe(p);
  2048. if (err) {
  2049. errors++;
  2050. ret = err;
  2051. }
  2052. total++;
  2053. }
  2054. }
  2055. }
  2056. if (errors)
  2057. pr_warn("Kprobes globally enabled, but failed to arm %d out of %d probes\n",
  2058. errors, total);
  2059. else
  2060. pr_info("Kprobes globally enabled\n");
  2061. already_enabled:
  2062. mutex_unlock(&kprobe_mutex);
  2063. return ret;
  2064. }
  2065. static int disarm_all_kprobes(void)
  2066. {
  2067. struct hlist_head *head;
  2068. struct kprobe *p;
  2069. unsigned int i, total = 0, errors = 0;
  2070. int err, ret = 0;
  2071. mutex_lock(&kprobe_mutex);
  2072. /* If kprobes are already disarmed, just return */
  2073. if (kprobes_all_disarmed) {
  2074. mutex_unlock(&kprobe_mutex);
  2075. return 0;
  2076. }
  2077. kprobes_all_disarmed = true;
  2078. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  2079. head = &kprobe_table[i];
  2080. /* Disarm all kprobes on a best-effort basis */
  2081. hlist_for_each_entry_rcu(p, head, hlist) {
  2082. if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) {
  2083. err = disarm_kprobe(p, false);
  2084. if (err) {
  2085. errors++;
  2086. ret = err;
  2087. }
  2088. total++;
  2089. }
  2090. }
  2091. }
  2092. if (errors)
  2093. pr_warn("Kprobes globally disabled, but failed to disarm %d out of %d probes\n",
  2094. errors, total);
  2095. else
  2096. pr_info("Kprobes globally disabled\n");
  2097. mutex_unlock(&kprobe_mutex);
  2098. /* Wait for disarming all kprobes by optimizer */
  2099. wait_for_kprobe_optimizer();
  2100. return ret;
  2101. }
  2102. /*
  2103. * XXX: The debugfs bool file interface doesn't allow for callbacks
  2104. * when the bool state is switched. We can reuse that facility when
  2105. * available
  2106. */
  2107. static ssize_t read_enabled_file_bool(struct file *file,
  2108. char __user *user_buf, size_t count, loff_t *ppos)
  2109. {
  2110. char buf[3];
  2111. if (!kprobes_all_disarmed)
  2112. buf[0] = '1';
  2113. else
  2114. buf[0] = '0';
  2115. buf[1] = '\n';
  2116. buf[2] = 0x00;
  2117. return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
  2118. }
  2119. static ssize_t write_enabled_file_bool(struct file *file,
  2120. const char __user *user_buf, size_t count, loff_t *ppos)
  2121. {
  2122. char buf[32];
  2123. size_t buf_size;
  2124. int ret = 0;
  2125. buf_size = min(count, (sizeof(buf)-1));
  2126. if (copy_from_user(buf, user_buf, buf_size))
  2127. return -EFAULT;
  2128. buf[buf_size] = '\0';
  2129. switch (buf[0]) {
  2130. case 'y':
  2131. case 'Y':
  2132. case '1':
  2133. ret = arm_all_kprobes();
  2134. break;
  2135. case 'n':
  2136. case 'N':
  2137. case '0':
  2138. ret = disarm_all_kprobes();
  2139. break;
  2140. default:
  2141. return -EINVAL;
  2142. }
  2143. if (ret)
  2144. return ret;
  2145. return count;
  2146. }
  2147. static const struct file_operations fops_kp = {
  2148. .read = read_enabled_file_bool,
  2149. .write = write_enabled_file_bool,
  2150. .llseek = default_llseek,
  2151. };
  2152. static int __init debugfs_kprobe_init(void)
  2153. {
  2154. struct dentry *dir, *file;
  2155. unsigned int value = 1;
  2156. dir = debugfs_create_dir("kprobes", NULL);
  2157. if (!dir)
  2158. return -ENOMEM;
  2159. file = debugfs_create_file("list", 0400, dir, NULL,
  2160. &debugfs_kprobes_operations);
  2161. if (!file)
  2162. goto error;
  2163. file = debugfs_create_file("enabled", 0600, dir,
  2164. &value, &fops_kp);
  2165. if (!file)
  2166. goto error;
  2167. file = debugfs_create_file("blacklist", 0400, dir, NULL,
  2168. &debugfs_kprobe_blacklist_ops);
  2169. if (!file)
  2170. goto error;
  2171. return 0;
  2172. error:
  2173. debugfs_remove(dir);
  2174. return -ENOMEM;
  2175. }
  2176. late_initcall(debugfs_kprobe_init);
  2177. #endif /* CONFIG_DEBUG_FS */
  2178. module_init(init_kprobes);