cmpxchg.c 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109
  1. /*
  2. * Copyright (C) 2017 Imagination Technologies
  3. * Author: Paul Burton <paul.burton@mips.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License as published by the
  7. * Free Software Foundation; either version 2 of the License, or (at your
  8. * option) any later version.
  9. */
  10. #include <linux/bitops.h>
  11. #include <asm/cmpxchg.h>
  12. unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int size)
  13. {
  14. u32 old32, new32, load32, mask;
  15. volatile u32 *ptr32;
  16. unsigned int shift;
  17. /* Check that ptr is naturally aligned */
  18. WARN_ON((unsigned long)ptr & (size - 1));
  19. /* Mask value to the correct size. */
  20. mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
  21. val &= mask;
  22. /*
  23. * Calculate a shift & mask that correspond to the value we wish to
  24. * exchange within the naturally aligned 4 byte integerthat includes
  25. * it.
  26. */
  27. shift = (unsigned long)ptr & 0x3;
  28. if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
  29. shift ^= sizeof(u32) - size;
  30. shift *= BITS_PER_BYTE;
  31. mask <<= shift;
  32. /*
  33. * Calculate a pointer to the naturally aligned 4 byte integer that
  34. * includes our byte of interest, and load its value.
  35. */
  36. ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
  37. load32 = *ptr32;
  38. do {
  39. old32 = load32;
  40. new32 = (load32 & ~mask) | (val << shift);
  41. load32 = cmpxchg(ptr32, old32, new32);
  42. } while (load32 != old32);
  43. return (load32 & mask) >> shift;
  44. }
  45. unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
  46. unsigned long new, unsigned int size)
  47. {
  48. u32 mask, old32, new32, load32;
  49. volatile u32 *ptr32;
  50. unsigned int shift;
  51. u8 load;
  52. /* Check that ptr is naturally aligned */
  53. WARN_ON((unsigned long)ptr & (size - 1));
  54. /* Mask inputs to the correct size. */
  55. mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
  56. old &= mask;
  57. new &= mask;
  58. /*
  59. * Calculate a shift & mask that correspond to the value we wish to
  60. * compare & exchange within the naturally aligned 4 byte integer
  61. * that includes it.
  62. */
  63. shift = (unsigned long)ptr & 0x3;
  64. if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
  65. shift ^= sizeof(u32) - size;
  66. shift *= BITS_PER_BYTE;
  67. mask <<= shift;
  68. /*
  69. * Calculate a pointer to the naturally aligned 4 byte integer that
  70. * includes our byte of interest, and load its value.
  71. */
  72. ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
  73. load32 = *ptr32;
  74. while (true) {
  75. /*
  76. * Ensure the byte we want to exchange matches the expected
  77. * old value, and if not then bail.
  78. */
  79. load = (load32 & mask) >> shift;
  80. if (load != old)
  81. return load;
  82. /*
  83. * Calculate the old & new values of the naturally aligned
  84. * 4 byte integer that include the byte we want to exchange.
  85. * Attempt to exchange the old value for the new value, and
  86. * return if we succeed.
  87. */
  88. old32 = (load32 & ~mask) | (old << shift);
  89. new32 = (load32 & ~mask) | (new << shift);
  90. load32 = cmpxchg(ptr32, old32, new32);
  91. if (load32 == old32)
  92. return old;
  93. }
  94. }