mm.h 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125
  1. #ifndef _LINUX_MM_H
  2. #define _LINUX_MM_H
  3. #include <assert.h>
  4. #include <string.h>
  5. #include <stdlib.h>
  6. #include <errno.h>
  7. #include <limits.h>
  8. #include <stdio.h>
  9. typedef unsigned long dma_addr_t;
  10. #define unlikely
  11. #define BUG_ON(x) assert(!(x))
  12. #define WARN_ON(condition) ({ \
  13. int __ret_warn_on = !!(condition); \
  14. unlikely(__ret_warn_on); \
  15. })
  16. #define WARN_ON_ONCE(condition) ({ \
  17. int __ret_warn_on = !!(condition); \
  18. if (unlikely(__ret_warn_on)) \
  19. assert(0); \
  20. unlikely(__ret_warn_on); \
  21. })
  22. #define PAGE_SIZE (4096)
  23. #define PAGE_SHIFT (12)
  24. #define PAGE_MASK (~(PAGE_SIZE-1))
  25. #define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
  26. #define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
  27. #define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
  28. #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
  29. #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
  30. #define virt_to_page(x) ((void *)x)
  31. #define page_address(x) ((void *)x)
  32. static inline unsigned long page_to_phys(struct page *page)
  33. {
  34. assert(0);
  35. return 0;
  36. }
  37. #define page_to_pfn(page) ((unsigned long)(page) / PAGE_SIZE)
  38. #define pfn_to_page(pfn) (void *)((pfn) * PAGE_SIZE)
  39. #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
  40. #define __min(t1, t2, min1, min2, x, y) ({ \
  41. t1 min1 = (x); \
  42. t2 min2 = (y); \
  43. (void) (&min1 == &min2); \
  44. min1 < min2 ? min1 : min2; })
  45. #define ___PASTE(a,b) a##b
  46. #define __PASTE(a,b) ___PASTE(a,b)
  47. #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
  48. #define min(x, y) \
  49. __min(typeof(x), typeof(y), \
  50. __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
  51. x, y)
  52. #define min_t(type, x, y) \
  53. __min(type, type, \
  54. __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
  55. x, y)
  56. #define preemptible() (1)
  57. static inline void *kmap(struct page *page)
  58. {
  59. assert(0);
  60. return NULL;
  61. }
  62. static inline void *kmap_atomic(struct page *page)
  63. {
  64. assert(0);
  65. return NULL;
  66. }
  67. static inline void kunmap(void *addr)
  68. {
  69. assert(0);
  70. }
  71. static inline void kunmap_atomic(void *addr)
  72. {
  73. assert(0);
  74. }
  75. static inline unsigned long __get_free_page(unsigned int flags)
  76. {
  77. return (unsigned long)malloc(PAGE_SIZE);
  78. }
  79. static inline void free_page(unsigned long page)
  80. {
  81. free((void *)page);
  82. }
  83. static inline void *kmalloc(unsigned int size, unsigned int flags)
  84. {
  85. return malloc(size);
  86. }
  87. #define kfree(x) free(x)
  88. #define kmemleak_alloc(a, b, c, d)
  89. #define kmemleak_free(a)
  90. #define PageSlab(p) (0)
  91. #define flush_kernel_dcache_page(p)
  92. #endif