patch-2.1.48 linux/include/asm-ppc/atomic.h
Next file: linux/include/asm-ppc/bitops.h
Previous file: linux/include/asm-mips/unistd.h
Back to the patch index
Back to the overall index
- Lines: 130
- Date:
Thu Jul 31 13:09:18 1997
- Orig file:
v2.1.47/linux/include/asm-ppc/atomic.h
- Orig date:
Wed Apr 23 19:01:28 1997
diff -u --recursive --new-file v2.1.47/linux/include/asm-ppc/atomic.h linux/include/asm-ppc/atomic.h
@@ -5,23 +5,116 @@
#ifndef _ASM_PPC_ATOMIC_H_
#define _ASM_PPC_ATOMIC_H_
+#ifdef __SMP__
+typedef struct { volatile int counter; } atomic_t;
+#else
typedef struct { int counter; } atomic_t;
-#define ATOMIC_INIT(i) { (i) }
+#endif
-/*
- * Make sure gcc doesn't try to be clever and move things around
- * on us. We need to use _exactly_ the address the user gave us,
- * not some alias that contains the same information.
- */
-#define __atomic_fool_gcc(x) (*(struct { int a[100]; } *)x)
+#define ATOMIC_INIT(i) { (i) }
#define atomic_read(v) ((v)->counter)
-#define atomic_set(v) (((v)->counter) = i)
+#define atomic_set(v,i) (((v)->counter) = (i))
-#define atomic_dec_return(v) ({atomic_sub(1,(v));(v);})
-#define atomic_inc_return(v) ({atomic_add(1,(v));(v);})
+extern void atomic_add(int a, atomic_t *v);
+extern void atomic_sub(int a, atomic_t *v);
+extern void atomic_inc(atomic_t *v);
+extern int atomic_inc_return(atomic_t *v);
+extern void atomic_dec(atomic_t *v);
+extern int atomic_dec_return(atomic_t *v);
+extern int atomic_dec_and_test(atomic_t *v);
-#define atomic_inc(v) atomic_add(1,(v))
-#define atomic_dec(v) atomic_sub(1,(v))
-#endif
+extern void atomic_clear_mask(unsigned long mask, unsigned long *addr);
+extern void atomic_set_mask(unsigned long mask, unsigned long *addr);
+
+#if 0 /* for now */
+extern __inline__ void atomic_add(atomic_t a, atomic_t *v)
+{
+ atomic_t t;
+
+ __asm__ __volatile__("\n\
+1: lwarx %0,0,%3\n\
+ add %0,%2,%0\n\
+ stwcx. %0,0,%3\n\
+ bne 1b"
+ : "=&r" (t), "=m" (*v)
+ : "r" (a), "r" (v)
+ : "cc");
+}
+
+extern __inline__ void atomic_sub(atomic_t a, atomic_t *v)
+{
+ atomic_t t;
+
+ __asm__ __volatile__("\n\
+1: lwarx %0,0,%3\n\
+ subf %0,%2,%0\n\
+ stwcx. %0,0,%3\n\
+ bne 1b"
+ : "=&r" (t), "=m" (*v)
+ : "r" (a), "r" (v)
+ : "cc");
+}
+
+extern __inline__ int atomic_sub_and_test(atomic_t a, atomic_t *v)
+{
+ atomic_t t;
+
+ __asm__ __volatile__("\n\
+1: lwarx %0,0,%3\n\
+ subf %0,%2,%0\n\
+ stwcx. %0,0,%3\n\
+ bne 1b"
+ : "=&r" (t), "=m" (*v)
+ : "r" (a), "r" (v)
+ : "cc");
+
+ return t == 0;
+}
+
+extern __inline__ void atomic_inc(atomic_t *v)
+{
+ atomic_t t;
+
+ __asm__ __volatile__("\n\
+1: lwarx %0,0,%2\n\
+ addic %0,%0,1\n\
+ stwcx. %0,0,%2\n\
+ bne 1b"
+ : "=&r" (t), "=m" (*v)
+ : "r" (v)
+ : "cc");
+}
+
+extern __inline__ void atomic_dec(atomic_t *v)
+{
+ atomic_t t;
+
+ __asm__ __volatile__("\n\
+1: lwarx %0,0,%2\n\
+ addic %0,%0,-1\n\
+ stwcx. %0,0,%2\n\
+ bne 1b"
+ : "=&r" (t), "=m" (*v)
+ : "r" (v)
+ : "cc");
+}
+
+extern __inline__ int atomic_dec_and_test(atomic_t *v)
+{
+ atomic_t t;
+
+ __asm__ __volatile__("\n\
+1: lwarx %0,0,%2\n\
+ addic %0,%0,-1\n\
+ stwcx. %0,0,%2\n\
+ bne 1b"
+ : "=&r" (t), "=m" (*v)
+ : "r" (v)
+ : "cc");
+
+ return t == 0;
+}
+#endif /* 0 */
+#endif /* _ASM_PPC_ATOMIC_H_ */
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov