__xchg will be used for non-atomic xchg macro.
Signed-off-by: Andrzej Hajda <andrzej.hajda@intel.com>
---
arch/hexagon/include/asm/cmpxchg.h | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
@@ -9,7 +9,7 @@
#define _ASM_CMPXCHG_H
/*
- * __xchg - atomically exchange a register and a memory location
+ * __arch_xchg - atomically exchange a register and a memory location
* @x: value to swap
* @ptr: pointer to memory
* @size: size of the value
@@ -19,7 +19,7 @@
* Note: there was an errata for V2 about .new's and memw_locked.
*
*/
-static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
+static inline unsigned long __arch_xchg(unsigned long x, volatile void *ptr,
int size)
{
unsigned long retval;
@@ -42,7 +42,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
* Atomically swap the contents of a register with memory. Should be atomic
* between multiple CPU's and within interrupts on the same CPU.
*/
-#define arch_xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), \
+#define arch_xchg(ptr, v) ((__typeof__(*(ptr)))__arch_xchg((unsigned long)(v), (ptr), \
sizeof(*(ptr))))
/*