slab_common.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678
  1. /*
  2. * Slab allocator functions that are independent of the allocator strategy
  3. *
  4. * (C) 2012 Christoph Lameter <cl@linux.com>
  5. */
  6. #include <linux/slab.h>
  7. #include <linux/mm.h>
  8. #include <linux/poison.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/memory.h>
  11. #include <linux/compiler.h>
  12. #include <linux/module.h>
  13. #include <linux/cpu.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/seq_file.h>
  16. #include <linux/proc_fs.h>
  17. #include <asm/cacheflush.h>
  18. #include <asm/tlbflush.h>
  19. #include <asm/page.h>
  20. #include <linux/memcontrol.h>
  21. #include <trace/events/kmem.h>
  22. #include "slab.h"
  23. enum slab_state slab_state;
  24. LIST_HEAD(slab_caches);
  25. DEFINE_MUTEX(slab_mutex);
  26. struct kmem_cache *kmem_cache;
  27. #ifdef CONFIG_DEBUG_VM
  28. static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
  29. size_t size)
  30. {
  31. struct kmem_cache *s = NULL;
  32. if (!name || in_interrupt() || size < sizeof(void *) ||
  33. size > KMALLOC_MAX_SIZE) {
  34. pr_err("kmem_cache_create(%s) integrity check failed\n", name);
  35. return -EINVAL;
  36. }
  37. list_for_each_entry(s, &slab_caches, list) {
  38. char tmp;
  39. int res;
  40. /*
  41. * This happens when the module gets unloaded and doesn't
  42. * destroy its slab cache and no-one else reuses the vmalloc
  43. * area of the module. Print a warning.
  44. */
  45. res = probe_kernel_address(s->name, tmp);
  46. if (res) {
  47. pr_err("Slab cache with size %d has lost its name\n",
  48. s->object_size);
  49. continue;
  50. }
  51. #if !defined(CONFIG_SLUB) || !defined(CONFIG_SLUB_DEBUG_ON)
  52. /*
  53. * For simplicity, we won't check this in the list of memcg
  54. * caches. We have control over memcg naming, and if there
  55. * aren't duplicates in the global list, there won't be any
  56. * duplicates in the memcg lists as well.
  57. */
  58. if (!memcg && !strcmp(s->name, name)) {
  59. pr_err("%s (%s): Cache name already exists.\n",
  60. __func__, name);
  61. dump_stack();
  62. s = NULL;
  63. return -EINVAL;
  64. }
  65. #endif
  66. }
  67. WARN_ON(strchr(name, ' ')); /* It confuses parsers */
  68. return 0;
  69. }
  70. #else
  71. static inline int kmem_cache_sanity_check(struct mem_cgroup *memcg,
  72. const char *name, size_t size)
  73. {
  74. return 0;
  75. }
  76. #endif
  77. #ifdef CONFIG_MEMCG_KMEM
  78. int memcg_update_all_caches(int num_memcgs)
  79. {
  80. struct kmem_cache *s;
  81. int ret = 0;
  82. mutex_lock(&slab_mutex);
  83. list_for_each_entry(s, &slab_caches, list) {
  84. if (!is_root_cache(s))
  85. continue;
  86. ret = memcg_update_cache_size(s, num_memcgs);
  87. /*
  88. * See comment in memcontrol.c, memcg_update_cache_size:
  89. * Instead of freeing the memory, we'll just leave the caches
  90. * up to this point in an updated state.
  91. */
  92. if (ret)
  93. goto out;
  94. }
  95. memcg_update_array_size(num_memcgs);
  96. out:
  97. mutex_unlock(&slab_mutex);
  98. return ret;
  99. }
  100. #endif
  101. /*
  102. * Figure out what the alignment of the objects will be given a set of
  103. * flags, a user specified alignment and the size of the objects.
  104. */
  105. unsigned long calculate_alignment(unsigned long flags,
  106. unsigned long align, unsigned long size)
  107. {
  108. /*
  109. * If the user wants hardware cache aligned objects then follow that
  110. * suggestion if the object is sufficiently large.
  111. *
  112. * The hardware cache alignment cannot override the specified
  113. * alignment though. If that is greater then use it.
  114. */
  115. if (flags & SLAB_HWCACHE_ALIGN) {
  116. unsigned long ralign = cache_line_size();
  117. while (size <= ralign / 2)
  118. ralign /= 2;
  119. align = max(align, ralign);
  120. }
  121. if (align < ARCH_SLAB_MINALIGN)
  122. align = ARCH_SLAB_MINALIGN;
  123. return ALIGN(align, sizeof(void *));
  124. }
  125. /*
  126. * kmem_cache_create - Create a cache.
  127. * @name: A string which is used in /proc/slabinfo to identify this cache.
  128. * @size: The size of objects to be created in this cache.
  129. * @align: The required alignment for the objects.
  130. * @flags: SLAB flags
  131. * @ctor: A constructor for the objects.
  132. *
  133. * Returns a ptr to the cache on success, NULL on failure.
  134. * Cannot be called within a interrupt, but can be interrupted.
  135. * The @ctor is run when new pages are allocated by the cache.
  136. *
  137. * The flags are
  138. *
  139. * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
  140. * to catch references to uninitialised memory.
  141. *
  142. * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
  143. * for buffer overruns.
  144. *
  145. * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
  146. * cacheline. This can be beneficial if you're counting cycles as closely
  147. * as davem.
  148. */
  149. struct kmem_cache *
  150. kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
  151. size_t align, unsigned long flags, void (*ctor)(void *),
  152. struct kmem_cache *parent_cache)
  153. {
  154. struct kmem_cache *s = NULL;
  155. int err;
  156. get_online_cpus();
  157. mutex_lock(&slab_mutex);
  158. err = kmem_cache_sanity_check(memcg, name, size);
  159. if (err)
  160. goto out_unlock;
  161. if (memcg) {
  162. /*
  163. * Since per-memcg caches are created asynchronously on first
  164. * allocation (see memcg_kmem_get_cache()), several threads can
  165. * try to create the same cache, but only one of them may
  166. * succeed. Therefore if we get here and see the cache has
  167. * already been created, we silently return NULL.
  168. */
  169. if (cache_from_memcg_idx(parent_cache, memcg_cache_id(memcg)))
  170. goto out_unlock;
  171. }
  172. /*
  173. * Some allocators will constraint the set of valid flags to a subset
  174. * of all flags. We expect them to define CACHE_CREATE_MASK in this
  175. * case, and we'll just provide them with a sanitized version of the
  176. * passed flags.
  177. */
  178. flags &= CACHE_CREATE_MASK;
  179. s = __kmem_cache_alias(memcg, name, size, align, flags, ctor);
  180. if (s)
  181. goto out_unlock;
  182. err = -ENOMEM;
  183. s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
  184. if (!s)
  185. goto out_unlock;
  186. s->object_size = s->size = size;
  187. s->align = calculate_alignment(flags, align, size);
  188. s->ctor = ctor;
  189. s->name = kstrdup(name, GFP_KERNEL);
  190. if (!s->name)
  191. goto out_free_cache;
  192. err = memcg_alloc_cache_params(memcg, s, parent_cache);
  193. if (err)
  194. goto out_free_cache;
  195. err = __kmem_cache_create(s, flags);
  196. if (err)
  197. goto out_free_cache;
  198. s->refcount = 1;
  199. list_add(&s->list, &slab_caches);
  200. memcg_register_cache(s);
  201. out_unlock:
  202. mutex_unlock(&slab_mutex);
  203. put_online_cpus();
  204. /*
  205. * There is no point in flooding logs with warnings or especially
  206. * crashing the system if we fail to create a cache for a memcg. In
  207. * this case we will be accounting the memcg allocation to the root
  208. * cgroup until we succeed to create its own cache, but it isn't that
  209. * critical.
  210. */
  211. if (err && !memcg) {
  212. if (flags & SLAB_PANIC)
  213. panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
  214. name, err);
  215. else {
  216. printk(KERN_WARNING "kmem_cache_create(%s) failed with error %d",
  217. name, err);
  218. dump_stack();
  219. }
  220. return NULL;
  221. }
  222. return s;
  223. out_free_cache:
  224. memcg_free_cache_params(s);
  225. kfree(s->name);
  226. kmem_cache_free(kmem_cache, s);
  227. goto out_unlock;
  228. }
  229. struct kmem_cache *
  230. kmem_cache_create(const char *name, size_t size, size_t align,
  231. unsigned long flags, void (*ctor)(void *))
  232. {
  233. return kmem_cache_create_memcg(NULL, name, size, align, flags, ctor, NULL);
  234. }
  235. EXPORT_SYMBOL(kmem_cache_create);
  236. void kmem_cache_destroy(struct kmem_cache *s)
  237. {
  238. /* Destroy all the children caches if we aren't a memcg cache */
  239. kmem_cache_destroy_memcg_children(s);
  240. get_online_cpus();
  241. mutex_lock(&slab_mutex);
  242. s->refcount--;
  243. if (!s->refcount) {
  244. list_del(&s->list);
  245. if (!__kmem_cache_shutdown(s)) {
  246. memcg_unregister_cache(s);
  247. mutex_unlock(&slab_mutex);
  248. if (s->flags & SLAB_DESTROY_BY_RCU)
  249. rcu_barrier();
  250. memcg_free_cache_params(s);
  251. kfree(s->name);
  252. kmem_cache_free(kmem_cache, s);
  253. } else {
  254. list_add(&s->list, &slab_caches);
  255. mutex_unlock(&slab_mutex);
  256. printk(KERN_ERR "kmem_cache_destroy %s: Slab cache still has objects\n",
  257. s->name);
  258. dump_stack();
  259. }
  260. } else {
  261. mutex_unlock(&slab_mutex);
  262. }
  263. put_online_cpus();
  264. }
  265. EXPORT_SYMBOL(kmem_cache_destroy);
  266. int slab_is_available(void)
  267. {
  268. return slab_state >= UP;
  269. }
  270. #ifndef CONFIG_SLOB
  271. /* Create a cache during boot when no slab services are available yet */
  272. void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size,
  273. unsigned long flags)
  274. {
  275. int err;
  276. s->name = name;
  277. s->size = s->object_size = size;
  278. s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size);
  279. err = __kmem_cache_create(s, flags);
  280. if (err)
  281. panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
  282. name, size, err);
  283. s->refcount = -1; /* Exempt from merging for now */
  284. }
  285. struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
  286. unsigned long flags)
  287. {
  288. struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
  289. if (!s)
  290. panic("Out of memory when creating slab %s\n", name);
  291. create_boot_cache(s, name, size, flags);
  292. list_add(&s->list, &slab_caches);
  293. s->refcount = 1;
  294. return s;
  295. }
  296. struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
  297. EXPORT_SYMBOL(kmalloc_caches);
  298. #ifdef CONFIG_ZONE_DMA
  299. struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
  300. EXPORT_SYMBOL(kmalloc_dma_caches);
  301. #endif
  302. /*
  303. * Conversion table for small slabs sizes / 8 to the index in the
  304. * kmalloc array. This is necessary for slabs < 192 since we have non power
  305. * of two cache sizes there. The size of larger slabs can be determined using
  306. * fls.
  307. */
  308. static s8 size_index[24] = {
  309. 3, /* 8 */
  310. 4, /* 16 */
  311. 5, /* 24 */
  312. 5, /* 32 */
  313. 6, /* 40 */
  314. 6, /* 48 */
  315. 6, /* 56 */
  316. 6, /* 64 */
  317. 1, /* 72 */
  318. 1, /* 80 */
  319. 1, /* 88 */
  320. 1, /* 96 */
  321. 7, /* 104 */
  322. 7, /* 112 */
  323. 7, /* 120 */
  324. 7, /* 128 */
  325. 2, /* 136 */
  326. 2, /* 144 */
  327. 2, /* 152 */
  328. 2, /* 160 */
  329. 2, /* 168 */
  330. 2, /* 176 */
  331. 2, /* 184 */
  332. 2 /* 192 */
  333. };
  334. static inline int size_index_elem(size_t bytes)
  335. {
  336. return (bytes - 1) / 8;
  337. }
  338. /*
  339. * Find the kmem_cache structure that serves a given size of
  340. * allocation
  341. */
  342. struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
  343. {
  344. int index;
  345. if (unlikely(size > KMALLOC_MAX_SIZE)) {
  346. WARN_ON_ONCE(!(flags & __GFP_NOWARN));
  347. return NULL;
  348. }
  349. if (size <= 192) {
  350. if (!size)
  351. return ZERO_SIZE_PTR;
  352. index = size_index[size_index_elem(size)];
  353. } else
  354. index = fls(size - 1);
  355. #ifdef CONFIG_ZONE_DMA
  356. if (unlikely((flags & GFP_DMA)))
  357. return kmalloc_dma_caches[index];
  358. #endif
  359. return kmalloc_caches[index];
  360. }
  361. /*
  362. * Create the kmalloc array. Some of the regular kmalloc arrays
  363. * may already have been created because they were needed to
  364. * enable allocations for slab creation.
  365. */
  366. void __init create_kmalloc_caches(unsigned long flags)
  367. {
  368. int i;
  369. /*
  370. * Patch up the size_index table if we have strange large alignment
  371. * requirements for the kmalloc array. This is only the case for
  372. * MIPS it seems. The standard arches will not generate any code here.
  373. *
  374. * Largest permitted alignment is 256 bytes due to the way we
  375. * handle the index determination for the smaller caches.
  376. *
  377. * Make sure that nothing crazy happens if someone starts tinkering
  378. * around with ARCH_KMALLOC_MINALIGN
  379. */
  380. BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
  381. (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
  382. for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
  383. int elem = size_index_elem(i);
  384. if (elem >= ARRAY_SIZE(size_index))
  385. break;
  386. size_index[elem] = KMALLOC_SHIFT_LOW;
  387. }
  388. if (KMALLOC_MIN_SIZE >= 64) {
  389. /*
  390. * The 96 byte size cache is not used if the alignment
  391. * is 64 byte.
  392. */
  393. for (i = 64 + 8; i <= 96; i += 8)
  394. size_index[size_index_elem(i)] = 7;
  395. }
  396. if (KMALLOC_MIN_SIZE >= 128) {
  397. /*
  398. * The 192 byte sized cache is not used if the alignment
  399. * is 128 byte. Redirect kmalloc to use the 256 byte cache
  400. * instead.
  401. */
  402. for (i = 128 + 8; i <= 192; i += 8)
  403. size_index[size_index_elem(i)] = 8;
  404. }
  405. for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
  406. if (!kmalloc_caches[i]) {
  407. kmalloc_caches[i] = create_kmalloc_cache(NULL,
  408. 1 << i, flags);
  409. }
  410. /*
  411. * Caches that are not of the two-to-the-power-of size.
  412. * These have to be created immediately after the
  413. * earlier power of two caches
  414. */
  415. if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
  416. kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
  417. if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
  418. kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
  419. }
  420. /* Kmalloc array is now usable */
  421. slab_state = UP;
  422. for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
  423. struct kmem_cache *s = kmalloc_caches[i];
  424. char *n;
  425. if (s) {
  426. n = kasprintf(GFP_NOWAIT, "kmalloc-%d", kmalloc_size(i));
  427. BUG_ON(!n);
  428. s->name = n;
  429. }
  430. }
  431. #ifdef CONFIG_ZONE_DMA
  432. for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
  433. struct kmem_cache *s = kmalloc_caches[i];
  434. if (s) {
  435. int size = kmalloc_size(i);
  436. char *n = kasprintf(GFP_NOWAIT,
  437. "dma-kmalloc-%d", size);
  438. BUG_ON(!n);
  439. kmalloc_dma_caches[i] = create_kmalloc_cache(n,
  440. size, SLAB_CACHE_DMA | flags);
  441. }
  442. }
  443. #endif
  444. }
  445. #endif /* !CONFIG_SLOB */
  446. #ifdef CONFIG_TRACING
  447. void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
  448. {
  449. void *ret = kmalloc_order(size, flags, order);
  450. trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
  451. return ret;
  452. }
  453. EXPORT_SYMBOL(kmalloc_order_trace);
  454. #endif
  455. #ifdef CONFIG_SLABINFO
  456. #ifdef CONFIG_SLAB
  457. #define SLABINFO_RIGHTS (S_IWUSR | S_IRUSR)
  458. #else
  459. #define SLABINFO_RIGHTS S_IRUSR
  460. #endif
  461. void print_slabinfo_header(struct seq_file *m)
  462. {
  463. /*
  464. * Output format version, so at least we can change it
  465. * without _too_ many complaints.
  466. */
  467. #ifdef CONFIG_DEBUG_SLAB
  468. seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
  469. #else
  470. seq_puts(m, "slabinfo - version: 2.1\n");
  471. #endif
  472. seq_puts(m, "# name <active_objs> <num_objs> <objsize> "
  473. "<objperslab> <pagesperslab>");
  474. seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
  475. seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
  476. #ifdef CONFIG_DEBUG_SLAB
  477. seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
  478. "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
  479. seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
  480. #endif
  481. seq_putc(m, '\n');
  482. }
  483. static void *s_start(struct seq_file *m, loff_t *pos)
  484. {
  485. loff_t n = *pos;
  486. mutex_lock(&slab_mutex);
  487. if (!n)
  488. print_slabinfo_header(m);
  489. return seq_list_start(&slab_caches, *pos);
  490. }
  491. void *slab_next(struct seq_file *m, void *p, loff_t *pos)
  492. {
  493. return seq_list_next(p, &slab_caches, pos);
  494. }
  495. void slab_stop(struct seq_file *m, void *p)
  496. {
  497. mutex_unlock(&slab_mutex);
  498. }
  499. static void
  500. memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info)
  501. {
  502. struct kmem_cache *c;
  503. struct slabinfo sinfo;
  504. int i;
  505. if (!is_root_cache(s))
  506. return;
  507. for_each_memcg_cache_index(i) {
  508. c = cache_from_memcg_idx(s, i);
  509. if (!c)
  510. continue;
  511. memset(&sinfo, 0, sizeof(sinfo));
  512. get_slabinfo(c, &sinfo);
  513. info->active_slabs += sinfo.active_slabs;
  514. info->num_slabs += sinfo.num_slabs;
  515. info->shared_avail += sinfo.shared_avail;
  516. info->active_objs += sinfo.active_objs;
  517. info->num_objs += sinfo.num_objs;
  518. }
  519. }
  520. int cache_show(struct kmem_cache *s, struct seq_file *m)
  521. {
  522. struct slabinfo sinfo;
  523. memset(&sinfo, 0, sizeof(sinfo));
  524. get_slabinfo(s, &sinfo);
  525. memcg_accumulate_slabinfo(s, &sinfo);
  526. seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
  527. cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size,
  528. sinfo.objects_per_slab, (1 << sinfo.cache_order));
  529. seq_printf(m, " : tunables %4u %4u %4u",
  530. sinfo.limit, sinfo.batchcount, sinfo.shared);
  531. seq_printf(m, " : slabdata %6lu %6lu %6lu",
  532. sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
  533. slabinfo_show_stats(m, s);
  534. seq_putc(m, '\n');
  535. return 0;
  536. }
  537. static int s_show(struct seq_file *m, void *p)
  538. {
  539. struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
  540. if (!is_root_cache(s))
  541. return 0;
  542. return cache_show(s, m);
  543. }
  544. /*
  545. * slabinfo_op - iterator that generates /proc/slabinfo
  546. *
  547. * Output layout:
  548. * cache-name
  549. * num-active-objs
  550. * total-objs
  551. * object size
  552. * num-active-slabs
  553. * total-slabs
  554. * num-pages-per-slab
  555. * + further values on SMP and with statistics enabled
  556. */
  557. static const struct seq_operations slabinfo_op = {
  558. .start = s_start,
  559. .next = slab_next,
  560. .stop = slab_stop,
  561. .show = s_show,
  562. };
  563. static int slabinfo_open(struct inode *inode, struct file *file)
  564. {
  565. return seq_open(file, &slabinfo_op);
  566. }
  567. static const struct file_operations proc_slabinfo_operations = {
  568. .open = slabinfo_open,
  569. .read = seq_read,
  570. .write = slabinfo_write,
  571. .llseek = seq_lseek,
  572. .release = seq_release,
  573. };
  574. static int __init slab_proc_init(void)
  575. {
  576. proc_create("slabinfo", SLABINFO_RIGHTS, NULL,
  577. &proc_slabinfo_operations);
  578. return 0;
  579. }
  580. module_init(slab_proc_init);
  581. #endif /* CONFIG_SLABINFO */