mman.h 2.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596
  1. #ifndef _LINUX_MMAN_H
  2. #define _LINUX_MMAN_H
  3. #include <asm/mman.h>
  4. #define MREMAP_MAYMOVE 1
  5. #define MREMAP_FIXED 2
  6. #define OVERCOMMIT_GUESS 0
  7. #define OVERCOMMIT_ALWAYS 1
  8. #define OVERCOMMIT_NEVER 2
  9. #ifdef __KERNEL__
  10. #include <linux/mm.h>
  11. #include <asm/atomic.h>
  12. extern int sysctl_overcommit_memory;
  13. extern int sysctl_overcommit_ratio;
  14. extern atomic_long_t vm_committed_space;
  15. #ifdef CONFIG_SMP
  16. extern void vm_acct_memory(long pages);
  17. #else
  18. static inline void vm_acct_memory(long pages)
  19. {
  20. atomic_long_add(pages, &vm_committed_space);
  21. }
  22. #endif
  23. static inline void vm_unacct_memory(long pages)
  24. {
  25. vm_acct_memory(-pages);
  26. }
  27. /*
  28. * Allow architectures to handle additional protection bits
  29. */
  30. #ifndef arch_calc_vm_prot_bits
  31. #define arch_calc_vm_prot_bits(prot) 0
  32. #endif
  33. #ifndef arch_vm_get_page_prot
  34. #define arch_vm_get_page_prot(vm_flags) __pgprot(0)
  35. #endif
  36. #ifndef arch_validate_prot
  37. /*
  38. * This is called from mprotect(). PROT_GROWSDOWN and PROT_GROWSUP have
  39. * already been masked out.
  40. *
  41. * Returns true if the prot flags are valid
  42. */
  43. static inline int arch_validate_prot(unsigned long prot)
  44. {
  45. return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0;
  46. }
  47. #define arch_validate_prot arch_validate_prot
  48. #endif
  49. /*
  50. * Optimisation macro. It is equivalent to:
  51. * (x & bit1) ? bit2 : 0
  52. * but this version is faster.
  53. * ("bit1" and "bit2" must be single bits)
  54. */
  55. #define _calc_vm_trans(x, bit1, bit2) \
  56. ((bit1) <= (bit2) ? ((x) & (bit1)) * ((bit2) / (bit1)) \
  57. : ((x) & (bit1)) / ((bit1) / (bit2)))
  58. /*
  59. * Combine the mmap "prot" argument into "vm_flags" used internally.
  60. */
  61. static inline unsigned long
  62. calc_vm_prot_bits(unsigned long prot)
  63. {
  64. return _calc_vm_trans(prot, PROT_READ, VM_READ ) |
  65. _calc_vm_trans(prot, PROT_WRITE, VM_WRITE) |
  66. _calc_vm_trans(prot, PROT_EXEC, VM_EXEC) |
  67. arch_calc_vm_prot_bits(prot);
  68. }
  69. /*
  70. * Combine the mmap "flags" argument into "vm_flags" used internally.
  71. */
  72. static inline unsigned long
  73. calc_vm_flag_bits(unsigned long flags)
  74. {
  75. return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) |
  76. _calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) |
  77. _calc_vm_trans(flags, MAP_EXECUTABLE, VM_EXECUTABLE) |
  78. _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED );
  79. }
  80. #endif /* __KERNEL__ */
  81. #endif /* _LINUX_MMAN_H */