Browse Source

arch/sparc: Introduce cmpxchg_u8 SPARC

SPARC supports 32 bit and 64 bit cmpxchg right now. Add support
for 8 bit (1 byte) cmpxchg. This is required to support queued
rwlocks feature  which uses 1 byte cmpxchg.

The function __cmpxchg_u8 here uses the 4 byte cas instruction with a
byte manipulation to achieve 1 byte cmpxchg.

Signed-off-by: Babu Moger <babu.moger@oracle.com>
Reviewed-by: Håkon Bugge <haakon.bugge@oracle.com>
Reviewed-by: Steve Sistare <steven.sistare@oracle.com>
Reviewed-by: Shannon Nelson <shannon.nelson@oracle.com>
Reviewed-by: Jane Chu <jane.chu@oracle.com>
Reviewed-by: Vijay Kumar <vijay.ac.kumar@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Babu Moger 8 years ago
parent
commit
a12ee23493
1 changed files with 29 additions and 0 deletions
  1. 29 0
      arch/sparc/include/asm/cmpxchg_64.h

+ 29 - 0
arch/sparc/include/asm/cmpxchg_64.h

@@ -87,6 +87,33 @@ __cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
 	return new;
 }
 
+/*
+ * Use 4 byte cas instruction to achieve 1 byte cmpxchg. Main logic
+ * here is to get the bit shift of the byte we are interested in.
+ * The XOR is handy for reversing the bits for big-endian byte order
+ */
+static inline unsigned long
+__cmpxchg_u8(volatile unsigned char *m, unsigned char old, unsigned char new)
+{
+	unsigned long maddr = (unsigned long)m;
+	int bit_shift = (((unsigned long)m & 3) ^ 3) << 3;
+	unsigned int mask = 0xff << bit_shift;
+	unsigned int *ptr = (unsigned int *) (maddr & ~3);
+	unsigned int old32, new32, load;
+	unsigned int load32 = *ptr;
+
+	do {
+		new32 = (load32 & ~mask) | (new << bit_shift);
+		old32 = (load32 & ~mask) | (old << bit_shift);
+		load32 = __cmpxchg_u32(ptr, old32, new32);
+		if (load32 == old32)
+			return old;
+		load = (load32 & mask) >> bit_shift;
+	} while (load == old);
+
+	return load;
+}
+
 /* This function doesn't exist, so you'll get a linker error
    if something tries to do an invalid cmpxchg().  */
 void __cmpxchg_called_with_bad_pointer(void);
@@ -95,6 +122,8 @@ static inline unsigned long
 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
 {
 	switch (size) {
+		case 1:
+			return __cmpxchg_u8(ptr, old, new);
 		case 4:
 			return __cmpxchg_u32(ptr, old, new);
 		case 8: