summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBabu Moger <babu.moger@oracle.com>2017-05-24 17:55:12 -0600
committerDavid S. Miller <davem@davemloft.net>2017-05-25 12:06:51 -0700
commita12ee2349312d7112b9b7c6ac2e70c5ec2ca334e (patch)
treef05689b6e06bbf9c03499ce2ad8728546a0b54cc
parent97d9f969161d79e6a4bba247e67ce731ff861f79 (diff)
downloadop-kernel-dev-a12ee2349312d7112b9b7c6ac2e70c5ec2ca334e.zip
op-kernel-dev-a12ee2349312d7112b9b7c6ac2e70c5ec2ca334e.tar.gz
arch/sparc: Introduce cmpxchg_u8 SPARC
SPARC supports 32 bit and 64 bit cmpxchg right now. Add support for 8 bit (1 byte) cmpxchg. This is required to support queued rwlocks feature which uses 1 byte cmpxchg. The function __cmpxchg_u8 here uses the 4 byte cas instruction with a byte manipulation to achieve 1 byte cmpxchg. Signed-off-by: Babu Moger <babu.moger@oracle.com> Reviewed-by: HÃ¥kon Bugge <haakon.bugge@oracle.com> Reviewed-by: Steve Sistare <steven.sistare@oracle.com> Reviewed-by: Shannon Nelson <shannon.nelson@oracle.com> Reviewed-by: Jane Chu <jane.chu@oracle.com> Reviewed-by: Vijay Kumar <vijay.ac.kumar@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/sparc/include/asm/cmpxchg_64.h29
1 files changed, 29 insertions, 0 deletions
diff --git a/arch/sparc/include/asm/cmpxchg_64.h b/arch/sparc/include/asm/cmpxchg_64.h
index faa2f61..000f7d7 100644
--- a/arch/sparc/include/asm/cmpxchg_64.h
+++ b/arch/sparc/include/asm/cmpxchg_64.h
@@ -87,6 +87,33 @@ __cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
return new;
}
+/*
+ * Use 4 byte cas instruction to achieve 1 byte cmpxchg. Main logic
+ * here is to get the bit shift of the byte we are interested in.
+ * The XOR is handy for reversing the bits for big-endian byte order
+ */
+static inline unsigned long
+__cmpxchg_u8(volatile unsigned char *m, unsigned char old, unsigned char new)
+{
+ unsigned long maddr = (unsigned long)m;
+ int bit_shift = (((unsigned long)m & 3) ^ 3) << 3;
+ unsigned int mask = 0xff << bit_shift;
+ unsigned int *ptr = (unsigned int *) (maddr & ~3);
+ unsigned int old32, new32, load;
+ unsigned int load32 = *ptr;
+
+ do {
+ new32 = (load32 & ~mask) | (new << bit_shift);
+ old32 = (load32 & ~mask) | (old << bit_shift);
+ load32 = __cmpxchg_u32(ptr, old32, new32);
+ if (load32 == old32)
+ return old;
+ load = (load32 & mask) >> bit_shift;
+ } while (load == old);
+
+ return load;
+}
+
/* This function doesn't exist, so you'll get a linker error
if something tries to do an invalid cmpxchg(). */
void __cmpxchg_called_with_bad_pointer(void);
@@ -95,6 +122,8 @@ static inline unsigned long
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
{
switch (size) {
+ case 1:
+ return __cmpxchg_u8(ptr, old, new);
case 4:
return __cmpxchg_u32(ptr, old, new);
case 8:
OpenPOWER on IntegriCloud