math64.h 2.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117
  1. #ifndef _LINUX_MATH64_H
  2. #define _LINUX_MATH64_H
  3. #include <linux/types.h>
  4. #include <asm/div64.h>
  5. #if BITS_PER_LONG == 64
  6. /**
  7. * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
  8. *
  9. * This is commonly provided by 32bit archs to provide an optimized 64bit
  10. * divide.
  11. */
  12. static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
  13. {
  14. *remainder = dividend % divisor;
  15. return dividend / divisor;
  16. }
  17. /**
  18. * div_s64_rem - signed 64bit divide with 32bit divisor with remainder
  19. */
  20. static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
  21. {
  22. *remainder = dividend % divisor;
  23. return dividend / divisor;
  24. }
  25. /**
  26. * div64_u64 - unsigned 64bit divide with 64bit divisor
  27. */
  28. static inline u64 div64_u64(u64 dividend, u64 divisor)
  29. {
  30. return dividend / divisor;
  31. }
  32. /**
  33. * div64_s64 - signed 64bit divide with 64bit divisor
  34. */
  35. static inline s64 div64_s64(s64 dividend, s64 divisor)
  36. {
  37. return dividend / divisor;
  38. }
  39. #elif BITS_PER_LONG == 32
  40. #ifndef div_u64_rem
  41. static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
  42. {
  43. *remainder = do_div(dividend, divisor);
  44. return dividend;
  45. }
  46. #endif
  47. #ifndef div_s64_rem
  48. extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
  49. #endif
  50. #ifndef div64_u64
  51. extern u64 div64_u64(u64 dividend, u64 divisor);
  52. #endif
  53. #ifndef div64_s64
  54. extern s64 div64_s64(s64 dividend, s64 divisor);
  55. #endif
  56. #endif /* BITS_PER_LONG */
  57. /**
  58. * div_u64 - unsigned 64bit divide with 32bit divisor
  59. *
  60. * This is the most common 64bit divide and should be used if possible,
  61. * as many 32bit archs can optimize this variant better than a full 64bit
  62. * divide.
  63. */
  64. #ifndef div_u64
  65. static inline u64 div_u64(u64 dividend, u32 divisor)
  66. {
  67. u32 remainder;
  68. return div_u64_rem(dividend, divisor, &remainder);
  69. }
  70. #endif
  71. /**
  72. * div_s64 - signed 64bit divide with 32bit divisor
  73. */
  74. #ifndef div_s64
  75. static inline s64 div_s64(s64 dividend, s32 divisor)
  76. {
  77. s32 remainder;
  78. return div_s64_rem(dividend, divisor, &remainder);
  79. }
  80. #endif
  81. u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
  82. static __always_inline u32
  83. __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
  84. {
  85. u32 ret = 0;
  86. while (dividend >= divisor) {
  87. /* The following asm() prevents the compiler from
  88. optimising this loop into a modulo operation. */
  89. asm("" : "+rm"(dividend));
  90. dividend -= divisor;
  91. ret++;
  92. }
  93. *remainder = dividend;
  94. return ret;
  95. }
  96. #endif /* _LINUX_MATH64_H */