bitops.h 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221
  1. #ifndef _LINUX_BITOPS_H
  2. #define _LINUX_BITOPS_H
  3. #include <asm/types.h>
  4. #ifdef __KERNEL__
  5. #define BIT(nr) (1UL << (nr))
  6. #define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
  7. #define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
  8. #define BITS_PER_BYTE 8
  9. #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
  10. #endif
  11. /*
  12. * Include this here because some architectures need generic_ffs/fls in
  13. * scope
  14. */
  15. #include <asm/bitops.h>
  16. #define for_each_set_bit(bit, addr, size) \
  17. for ((bit) = find_first_bit((addr), (size)); \
  18. (bit) < (size); \
  19. (bit) = find_next_bit((addr), (size), (bit) + 1))
  20. /* Temporary */
  21. #define for_each_bit(bit, addr, size) for_each_set_bit(bit, addr, size)
  22. static __inline__ int get_bitmask_order(unsigned int count)
  23. {
  24. int order;
  25. order = fls(count);
  26. return order; /* We could be slightly more clever with -1 here... */
  27. }
  28. static __inline__ int get_count_order(unsigned int count)
  29. {
  30. int order;
  31. order = fls(count) - 1;
  32. if (count & (count - 1))
  33. order++;
  34. return order;
  35. }
  36. static inline unsigned long hweight_long(unsigned long w)
  37. {
  38. return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
  39. }
  40. /*
  41. * Clearly slow versions of the hweightN() functions, their benefit is
  42. * of course compile time evaluation of constant arguments.
  43. */
  44. #define HWEIGHT8(w) \
  45. ( BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + \
  46. (!!((w) & (1ULL << 0))) + \
  47. (!!((w) & (1ULL << 1))) + \
  48. (!!((w) & (1ULL << 2))) + \
  49. (!!((w) & (1ULL << 3))) + \
  50. (!!((w) & (1ULL << 4))) + \
  51. (!!((w) & (1ULL << 5))) + \
  52. (!!((w) & (1ULL << 6))) + \
  53. (!!((w) & (1ULL << 7))) )
  54. #define HWEIGHT16(w) (HWEIGHT8(w) + HWEIGHT8((w) >> 8))
  55. #define HWEIGHT32(w) (HWEIGHT16(w) + HWEIGHT16((w) >> 16))
  56. #define HWEIGHT64(w) (HWEIGHT32(w) + HWEIGHT32((w) >> 32))
  57. /*
  58. * Type invariant version that simply casts things to the
  59. * largest type.
  60. */
  61. #define HWEIGHT(w) HWEIGHT64((u64)(w))
  62. /**
  63. * rol32 - rotate a 32-bit value left
  64. * @word: value to rotate
  65. * @shift: bits to roll
  66. */
  67. static inline __u32 rol32(__u32 word, unsigned int shift)
  68. {
  69. return (word << shift) | (word >> (32 - shift));
  70. }
  71. /**
  72. * ror32 - rotate a 32-bit value right
  73. * @word: value to rotate
  74. * @shift: bits to roll
  75. */
  76. static inline __u32 ror32(__u32 word, unsigned int shift)
  77. {
  78. return (word >> shift) | (word << (32 - shift));
  79. }
  80. /**
  81. * rol16 - rotate a 16-bit value left
  82. * @word: value to rotate
  83. * @shift: bits to roll
  84. */
  85. static inline __u16 rol16(__u16 word, unsigned int shift)
  86. {
  87. return (word << shift) | (word >> (16 - shift));
  88. }
  89. /**
  90. * ror16 - rotate a 16-bit value right
  91. * @word: value to rotate
  92. * @shift: bits to roll
  93. */
  94. static inline __u16 ror16(__u16 word, unsigned int shift)
  95. {
  96. return (word >> shift) | (word << (16 - shift));
  97. }
  98. /**
  99. * rol8 - rotate an 8-bit value left
  100. * @word: value to rotate
  101. * @shift: bits to roll
  102. */
  103. static inline __u8 rol8(__u8 word, unsigned int shift)
  104. {
  105. return (word << shift) | (word >> (8 - shift));
  106. }
  107. /**
  108. * ror8 - rotate an 8-bit value right
  109. * @word: value to rotate
  110. * @shift: bits to roll
  111. */
  112. static inline __u8 ror8(__u8 word, unsigned int shift)
  113. {
  114. return (word >> shift) | (word << (8 - shift));
  115. }
  116. static inline unsigned fls_long(unsigned long l)
  117. {
  118. if (sizeof(l) == 4)
  119. return fls(l);
  120. return fls64(l);
  121. }
  122. /**
  123. * __ffs64 - find first set bit in a 64 bit word
  124. * @word: The 64 bit word
  125. *
  126. * On 64 bit arches this is a synomyn for __ffs
  127. * The result is not defined if no bits are set, so check that @word
  128. * is non-zero before calling this.
  129. */
  130. static inline unsigned long __ffs64(u64 word)
  131. {
  132. #if BITS_PER_LONG == 32
  133. if (((u32)word) == 0UL)
  134. return __ffs((u32)(word >> 32)) + 32;
  135. #elif BITS_PER_LONG != 64
  136. #error BITS_PER_LONG not 32 or 64
  137. #endif
  138. return __ffs((unsigned long)word);
  139. }
  140. #ifdef __KERNEL__
  141. #ifdef CONFIG_GENERIC_FIND_FIRST_BIT
  142. /**
  143. * find_first_bit - find the first set bit in a memory region
  144. * @addr: The address to start the search at
  145. * @size: The maximum size to search
  146. *
  147. * Returns the bit number of the first set bit.
  148. */
  149. extern unsigned long find_first_bit(const unsigned long *addr,
  150. unsigned long size);
  151. /**
  152. * find_first_zero_bit - find the first cleared bit in a memory region
  153. * @addr: The address to start the search at
  154. * @size: The maximum size to search
  155. *
  156. * Returns the bit number of the first cleared bit.
  157. */
  158. extern unsigned long find_first_zero_bit(const unsigned long *addr,
  159. unsigned long size);
  160. #endif /* CONFIG_GENERIC_FIND_FIRST_BIT */
  161. #ifdef CONFIG_GENERIC_FIND_LAST_BIT
  162. /**
  163. * find_last_bit - find the last set bit in a memory region
  164. * @addr: The address to start the search at
  165. * @size: The maximum size to search
  166. *
  167. * Returns the bit number of the first set bit, or size.
  168. */
  169. extern unsigned long find_last_bit(const unsigned long *addr,
  170. unsigned long size);
  171. #endif /* CONFIG_GENERIC_FIND_LAST_BIT */
  172. #ifdef CONFIG_GENERIC_FIND_NEXT_BIT
  173. /**
  174. * find_next_bit - find the next set bit in a memory region
  175. * @addr: The address to base the search on
  176. * @offset: The bitnumber to start searching at
  177. * @size: The bitmap size in bits
  178. */
  179. extern unsigned long find_next_bit(const unsigned long *addr,
  180. unsigned long size, unsigned long offset);
  181. /**
  182. * find_next_zero_bit - find the next cleared bit in a memory region
  183. * @addr: The address to base the search on
  184. * @offset: The bitnumber to start searching at
  185. * @size: The bitmap size in bits
  186. */
  187. extern unsigned long find_next_zero_bit(const unsigned long *addr,
  188. unsigned long size,
  189. unsigned long offset);
  190. #endif /* CONFIG_GENERIC_FIND_NEXT_BIT */
  191. #endif /* __KERNEL__ */
  192. #endif