linux.c 2.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115
  1. #include <stdlib.h>
  2. #include <string.h>
  3. #include <malloc.h>
  4. #include <pthread.h>
  5. #include <unistd.h>
  6. #include <assert.h>
  7. #include <linux/mempool.h>
  8. #include <linux/poison.h>
  9. #include <linux/slab.h>
  10. #include <linux/radix-tree.h>
  11. #include <urcu/uatomic.h>
  12. int nr_allocated;
  13. int preempt_count;
  14. struct kmem_cache {
  15. pthread_mutex_t lock;
  16. int size;
  17. int nr_objs;
  18. void *objs;
  19. void (*ctor)(void *);
  20. };
  21. void *mempool_alloc(mempool_t *pool, int gfp_mask)
  22. {
  23. return pool->alloc(gfp_mask, pool->data);
  24. }
  25. void mempool_free(void *element, mempool_t *pool)
  26. {
  27. pool->free(element, pool->data);
  28. }
  29. mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
  30. mempool_free_t *free_fn, void *pool_data)
  31. {
  32. mempool_t *ret = malloc(sizeof(*ret));
  33. ret->alloc = alloc_fn;
  34. ret->free = free_fn;
  35. ret->data = pool_data;
  36. return ret;
  37. }
  38. void *kmem_cache_alloc(struct kmem_cache *cachep, int flags)
  39. {
  40. struct radix_tree_node *node;
  41. if (flags & __GFP_NOWARN)
  42. return NULL;
  43. pthread_mutex_lock(&cachep->lock);
  44. if (cachep->nr_objs) {
  45. cachep->nr_objs--;
  46. node = cachep->objs;
  47. cachep->objs = node->private_data;
  48. pthread_mutex_unlock(&cachep->lock);
  49. node->private_data = NULL;
  50. } else {
  51. pthread_mutex_unlock(&cachep->lock);
  52. node = malloc(cachep->size);
  53. if (cachep->ctor)
  54. cachep->ctor(node);
  55. }
  56. uatomic_inc(&nr_allocated);
  57. return node;
  58. }
  59. void kmem_cache_free(struct kmem_cache *cachep, void *objp)
  60. {
  61. assert(objp);
  62. uatomic_dec(&nr_allocated);
  63. pthread_mutex_lock(&cachep->lock);
  64. if (cachep->nr_objs > 10) {
  65. memset(objp, POISON_FREE, cachep->size);
  66. free(objp);
  67. } else {
  68. struct radix_tree_node *node = objp;
  69. cachep->nr_objs++;
  70. node->private_data = cachep->objs;
  71. cachep->objs = node;
  72. }
  73. pthread_mutex_unlock(&cachep->lock);
  74. }
  75. void *kmalloc(size_t size, gfp_t gfp)
  76. {
  77. void *ret = malloc(size);
  78. uatomic_inc(&nr_allocated);
  79. return ret;
  80. }
  81. void kfree(void *p)
  82. {
  83. if (!p)
  84. return;
  85. uatomic_dec(&nr_allocated);
  86. free(p);
  87. }
  88. struct kmem_cache *
  89. kmem_cache_create(const char *name, size_t size, size_t offset,
  90. unsigned long flags, void (*ctor)(void *))
  91. {
  92. struct kmem_cache *ret = malloc(sizeof(*ret));
  93. pthread_mutex_init(&ret->lock, NULL);
  94. ret->size = size;
  95. ret->nr_objs = 0;
  96. ret->objs = NULL;
  97. ret->ctor = ctor;
  98. return ret;
  99. }