cma_debug.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205
  1. /*
  2. * CMA DebugFS Interface
  3. *
  4. * Copyright (c) 2015 Sasha Levin <sasha.levin@oracle.com>
  5. */
  6. #include <linux/debugfs.h>
  7. #include <linux/cma.h>
  8. #include <linux/list.h>
  9. #include <linux/kernel.h>
  10. #include <linux/slab.h>
  11. #include <linux/mm_types.h>
  12. #include "cma.h"
  13. struct cma_mem {
  14. struct hlist_node node;
  15. struct page *p;
  16. unsigned long n;
  17. };
  18. static struct dentry *cma_debugfs_root;
  19. static int cma_debugfs_get(void *data, u64 *val)
  20. {
  21. unsigned long *p = data;
  22. *val = *p;
  23. return 0;
  24. }
  25. DEFINE_SIMPLE_ATTRIBUTE(cma_debugfs_fops, cma_debugfs_get, NULL, "%llu\n");
  26. static int cma_used_get(void *data, u64 *val)
  27. {
  28. struct cma *cma = data;
  29. unsigned long used;
  30. mutex_lock(&cma->lock);
  31. /* pages counter is smaller than sizeof(int) */
  32. used = bitmap_weight(cma->bitmap, (int)cma->count);
  33. mutex_unlock(&cma->lock);
  34. *val = (u64)used << cma->order_per_bit;
  35. return 0;
  36. }
  37. DEFINE_SIMPLE_ATTRIBUTE(cma_used_fops, cma_used_get, NULL, "%llu\n");
  38. static int cma_maxchunk_get(void *data, u64 *val)
  39. {
  40. struct cma *cma = data;
  41. unsigned long maxchunk = 0;
  42. unsigned long start, end = 0;
  43. mutex_lock(&cma->lock);
  44. for (;;) {
  45. start = find_next_zero_bit(cma->bitmap, cma->count, end);
  46. if (start >= cma->count)
  47. break;
  48. end = find_next_bit(cma->bitmap, cma->count, start);
  49. maxchunk = max(end - start, maxchunk);
  50. }
  51. mutex_unlock(&cma->lock);
  52. *val = (u64)maxchunk << cma->order_per_bit;
  53. return 0;
  54. }
  55. DEFINE_SIMPLE_ATTRIBUTE(cma_maxchunk_fops, cma_maxchunk_get, NULL, "%llu\n");
  56. static void cma_add_to_cma_mem_list(struct cma *cma, struct cma_mem *mem)
  57. {
  58. spin_lock(&cma->mem_head_lock);
  59. hlist_add_head(&mem->node, &cma->mem_head);
  60. spin_unlock(&cma->mem_head_lock);
  61. }
  62. static struct cma_mem *cma_get_entry_from_list(struct cma *cma)
  63. {
  64. struct cma_mem *mem = NULL;
  65. spin_lock(&cma->mem_head_lock);
  66. if (!hlist_empty(&cma->mem_head)) {
  67. mem = hlist_entry(cma->mem_head.first, struct cma_mem, node);
  68. hlist_del_init(&mem->node);
  69. }
  70. spin_unlock(&cma->mem_head_lock);
  71. return mem;
  72. }
  73. static int cma_free_mem(struct cma *cma, int count)
  74. {
  75. struct cma_mem *mem = NULL;
  76. while (count) {
  77. mem = cma_get_entry_from_list(cma);
  78. if (mem == NULL)
  79. return 0;
  80. if (mem->n <= count) {
  81. cma_release(cma, mem->p, mem->n);
  82. count -= mem->n;
  83. kfree(mem);
  84. } else if (cma->order_per_bit == 0) {
  85. cma_release(cma, mem->p, count);
  86. mem->p += count;
  87. mem->n -= count;
  88. count = 0;
  89. cma_add_to_cma_mem_list(cma, mem);
  90. } else {
  91. pr_debug("cma: cannot release partial block when order_per_bit != 0\n");
  92. cma_add_to_cma_mem_list(cma, mem);
  93. break;
  94. }
  95. }
  96. return 0;
  97. }
  98. static int cma_free_write(void *data, u64 val)
  99. {
  100. int pages = val;
  101. struct cma *cma = data;
  102. return cma_free_mem(cma, pages);
  103. }
  104. DEFINE_SIMPLE_ATTRIBUTE(cma_free_fops, NULL, cma_free_write, "%llu\n");
  105. static int cma_alloc_mem(struct cma *cma, int count)
  106. {
  107. struct cma_mem *mem;
  108. struct page *p;
  109. mem = kzalloc(sizeof(*mem), GFP_KERNEL);
  110. if (!mem)
  111. return -ENOMEM;
  112. p = cma_alloc(cma, count, 0);
  113. if (!p) {
  114. kfree(mem);
  115. return -ENOMEM;
  116. }
  117. mem->p = p;
  118. mem->n = count;
  119. cma_add_to_cma_mem_list(cma, mem);
  120. return 0;
  121. }
  122. static int cma_alloc_write(void *data, u64 val)
  123. {
  124. int pages = val;
  125. struct cma *cma = data;
  126. return cma_alloc_mem(cma, pages);
  127. }
  128. DEFINE_SIMPLE_ATTRIBUTE(cma_alloc_fops, NULL, cma_alloc_write, "%llu\n");
  129. static void cma_debugfs_add_one(struct cma *cma, int idx)
  130. {
  131. struct dentry *tmp;
  132. char name[16];
  133. int u32s;
  134. sprintf(name, "cma-%d", idx);
  135. tmp = debugfs_create_dir(name, cma_debugfs_root);
  136. debugfs_create_file("alloc", S_IWUSR, cma_debugfs_root, cma,
  137. &cma_alloc_fops);
  138. debugfs_create_file("free", S_IWUSR, cma_debugfs_root, cma,
  139. &cma_free_fops);
  140. debugfs_create_file("base_pfn", S_IRUGO, tmp,
  141. &cma->base_pfn, &cma_debugfs_fops);
  142. debugfs_create_file("count", S_IRUGO, tmp,
  143. &cma->count, &cma_debugfs_fops);
  144. debugfs_create_file("order_per_bit", S_IRUGO, tmp,
  145. &cma->order_per_bit, &cma_debugfs_fops);
  146. debugfs_create_file("used", S_IRUGO, tmp, cma, &cma_used_fops);
  147. debugfs_create_file("maxchunk", S_IRUGO, tmp, cma, &cma_maxchunk_fops);
  148. u32s = DIV_ROUND_UP(cma_bitmap_maxno(cma), BITS_PER_BYTE * sizeof(u32));
  149. debugfs_create_u32_array("bitmap", S_IRUGO, tmp, (u32*)cma->bitmap, u32s);
  150. }
  151. static int __init cma_debugfs_init(void)
  152. {
  153. int i;
  154. cma_debugfs_root = debugfs_create_dir("cma", NULL);
  155. if (!cma_debugfs_root)
  156. return -ENOMEM;
  157. for (i = 0; i < cma_area_count; i++)
  158. cma_debugfs_add_one(&cma_areas[i], i);
  159. return 0;
  160. }
  161. late_initcall(cma_debugfs_init);