bitops.h 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_BITOPS_H
  3. #define _LINUX_BITOPS_H
  4. #include <asm/types.h>
  5. #include <linux/bits.h>
  6. #define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
  7. #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
  8. extern unsigned int __sw_hweight8(unsigned int w);
  9. extern unsigned int __sw_hweight16(unsigned int w);
  10. extern unsigned int __sw_hweight32(unsigned int w);
  11. extern unsigned long __sw_hweight64(__u64 w);
  12. /*
  13. * Include this here because some architectures need generic_ffs/fls in
  14. * scope
  15. */
  16. #include <asm/bitops.h>
  17. #define for_each_set_bit(bit, addr, size) \
  18. for ((bit) = find_first_bit((addr), (size)); \
  19. (bit) < (size); \
  20. (bit) = find_next_bit((addr), (size), (bit) + 1))
  21. /* same as for_each_set_bit() but use bit as value to start with */
  22. #define for_each_set_bit_from(bit, addr, size) \
  23. for ((bit) = find_next_bit((addr), (size), (bit)); \
  24. (bit) < (size); \
  25. (bit) = find_next_bit((addr), (size), (bit) + 1))
  26. #define for_each_clear_bit(bit, addr, size) \
  27. for ((bit) = find_first_zero_bit((addr), (size)); \
  28. (bit) < (size); \
  29. (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
  30. /* same as for_each_clear_bit() but use bit as value to start with */
  31. #define for_each_clear_bit_from(bit, addr, size) \
  32. for ((bit) = find_next_zero_bit((addr), (size), (bit)); \
  33. (bit) < (size); \
  34. (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
  35. static inline int get_bitmask_order(unsigned int count)
  36. {
  37. int order;
  38. order = fls(count);
  39. return order; /* We could be slightly more clever with -1 here... */
  40. }
  41. static __always_inline unsigned long hweight_long(unsigned long w)
  42. {
  43. return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
  44. }
  45. /**
  46. * rol64 - rotate a 64-bit value left
  47. * @word: value to rotate
  48. * @shift: bits to roll
  49. */
  50. static inline __u64 rol64(__u64 word, unsigned int shift)
  51. {
  52. return (word << shift) | (word >> (64 - shift));
  53. }
  54. /**
  55. * ror64 - rotate a 64-bit value right
  56. * @word: value to rotate
  57. * @shift: bits to roll
  58. */
  59. static inline __u64 ror64(__u64 word, unsigned int shift)
  60. {
  61. return (word >> shift) | (word << (64 - shift));
  62. }
  63. /**
  64. * rol32 - rotate a 32-bit value left
  65. * @word: value to rotate
  66. * @shift: bits to roll
  67. */
  68. static inline __u32 rol32(__u32 word, unsigned int shift)
  69. {
  70. return (word << shift) | (word >> ((-shift) & 31));
  71. }
  72. /**
  73. * ror32 - rotate a 32-bit value right
  74. * @word: value to rotate
  75. * @shift: bits to roll
  76. */
  77. static inline __u32 ror32(__u32 word, unsigned int shift)
  78. {
  79. return (word >> shift) | (word << (32 - shift));
  80. }
  81. /**
  82. * rol16 - rotate a 16-bit value left
  83. * @word: value to rotate
  84. * @shift: bits to roll
  85. */
  86. static inline __u16 rol16(__u16 word, unsigned int shift)
  87. {
  88. return (word << shift) | (word >> (16 - shift));
  89. }
  90. /**
  91. * ror16 - rotate a 16-bit value right
  92. * @word: value to rotate
  93. * @shift: bits to roll
  94. */
  95. static inline __u16 ror16(__u16 word, unsigned int shift)
  96. {
  97. return (word >> shift) | (word << (16 - shift));
  98. }
  99. /**
  100. * rol8 - rotate an 8-bit value left
  101. * @word: value to rotate
  102. * @shift: bits to roll
  103. */
  104. static inline __u8 rol8(__u8 word, unsigned int shift)
  105. {
  106. return (word << shift) | (word >> (8 - shift));
  107. }
  108. /**
  109. * ror8 - rotate an 8-bit value right
  110. * @word: value to rotate
  111. * @shift: bits to roll
  112. */
  113. static inline __u8 ror8(__u8 word, unsigned int shift)
  114. {
  115. return (word >> shift) | (word << (8 - shift));
  116. }
  117. /**
  118. * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit
  119. * @value: value to sign extend
  120. * @index: 0 based bit index (0<=index<32) to sign bit
  121. *
  122. * This is safe to use for 16- and 8-bit types as well.
  123. */
  124. static inline __s32 sign_extend32(__u32 value, int index)
  125. {
  126. __u8 shift = 31 - index;
  127. return (__s32)(value << shift) >> shift;
  128. }
  129. /**
  130. * sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit
  131. * @value: value to sign extend
  132. * @index: 0 based bit index (0<=index<64) to sign bit
  133. */
  134. static inline __s64 sign_extend64(__u64 value, int index)
  135. {
  136. __u8 shift = 63 - index;
  137. return (__s64)(value << shift) >> shift;
  138. }
  139. static inline unsigned fls_long(unsigned long l)
  140. {
  141. if (sizeof(l) == 4)
  142. return fls(l);
  143. return fls64(l);
  144. }
  145. static inline int get_count_order(unsigned int count)
  146. {
  147. int order;
  148. order = fls(count) - 1;
  149. if (count & (count - 1))
  150. order++;
  151. return order;
  152. }
  153. /**
  154. * get_count_order_long - get order after rounding @l up to power of 2
  155. * @l: parameter
  156. *
  157. * it is same as get_count_order() but with long type parameter
  158. */
  159. static inline int get_count_order_long(unsigned long l)
  160. {
  161. if (l == 0UL)
  162. return -1;
  163. else if (l & (l - 1UL))
  164. return (int)fls_long(l);
  165. else
  166. return (int)fls_long(l) - 1;
  167. }
  168. /**
  169. * __ffs64 - find first set bit in a 64 bit word
  170. * @word: The 64 bit word
  171. *
  172. * On 64 bit arches this is a synomyn for __ffs
  173. * The result is not defined if no bits are set, so check that @word
  174. * is non-zero before calling this.
  175. */
  176. static inline unsigned long __ffs64(u64 word)
  177. {
  178. #if BITS_PER_LONG == 32
  179. if (((u32)word) == 0UL)
  180. return __ffs((u32)(word >> 32)) + 32;
  181. #elif BITS_PER_LONG != 64
  182. #error BITS_PER_LONG not 32 or 64
  183. #endif
  184. return __ffs((unsigned long)word);
  185. }
  186. /**
  187. * assign_bit - Assign value to a bit in memory
  188. * @nr: the bit to set
  189. * @addr: the address to start counting from
  190. * @value: the value to assign
  191. */
  192. static __always_inline void assign_bit(long nr, volatile unsigned long *addr,
  193. bool value)
  194. {
  195. if (value)
  196. set_bit(nr, addr);
  197. else
  198. clear_bit(nr, addr);
  199. }
  200. static __always_inline void __assign_bit(long nr, volatile unsigned long *addr,
  201. bool value)
  202. {
  203. if (value)
  204. __set_bit(nr, addr);
  205. else
  206. __clear_bit(nr, addr);
  207. }
  208. #ifdef __KERNEL__
  209. #ifndef set_mask_bits
  210. #define set_mask_bits(ptr, mask, bits) \
  211. ({ \
  212. const typeof(*(ptr)) mask__ = (mask), bits__ = (bits); \
  213. typeof(*(ptr)) old__, new__; \
  214. \
  215. do { \
  216. old__ = READ_ONCE(*(ptr)); \
  217. new__ = (old__ & ~mask__) | bits__; \
  218. } while (cmpxchg(ptr, old__, new__) != old__); \
  219. \
  220. new__; \
  221. })
  222. #endif
  223. #ifndef bit_clear_unless
  224. #define bit_clear_unless(ptr, clear, test) \
  225. ({ \
  226. const typeof(*(ptr)) clear__ = (clear), test__ = (test);\
  227. typeof(*(ptr)) old__, new__; \
  228. \
  229. do { \
  230. old__ = READ_ONCE(*(ptr)); \
  231. new__ = old__ & ~clear__; \
  232. } while (!(old__ & test__) && \
  233. cmpxchg(ptr, old__, new__) != old__); \
  234. \
  235. !(old__ & test__); \
  236. })
  237. #endif
  238. #ifndef find_last_bit
  239. /**
  240. * find_last_bit - find the last set bit in a memory region
  241. * @addr: The address to start the search at
  242. * @size: The number of bits to search
  243. *
  244. * Returns the bit number of the last set bit, or size.
  245. */
  246. extern unsigned long find_last_bit(const unsigned long *addr,
  247. unsigned long size);
  248. #endif
  249. #endif /* __KERNEL__ */
  250. #endif