patch-2.1.53 linux/include/asm-ppc/spinlock.h
Next file: linux/include/asm-ppc/system.h
Previous file: linux/include/asm-ppc/softirq.h
Back to the patch index
Back to the overall index
- Lines: 162
- Date:
Thu Sep 4 12:54:49 1997
- Orig file:
v2.1.52/linux/include/asm-ppc/spinlock.h
- Orig date:
Mon Aug 4 16:25:39 1997
diff -u --recursive --new-file v2.1.52/linux/include/asm-ppc/spinlock.h linux/include/asm-ppc/spinlock.h
@@ -3,7 +3,7 @@
#ifndef __SMP__
-typedef struct { int blah; } spinlock_t;
+typedef struct { } spinlock_t;
#define SPIN_LOCK_UNLOCKED { }
#define spin_lock_init(lock) do { } while(0)
@@ -28,7 +28,7 @@
* irq-safe write-lock, but readers can get non-irqsafe
* read-locks.
*/
-typedef struct { int fred; } rwlock_t;
+typedef struct { } rwlock_t;
#define RW_LOCK_UNLOCKED { }
#define read_lock(lock) do { } while(0)
@@ -49,7 +49,7 @@
#define write_unlock_irqrestore(lock, flags) \
restore_flags(flags)
-#else
+#else /* __SMP__ */
/* Simple spin lock operations. There are two variants, one clears IRQ's
* on the local processor, one does not.
@@ -57,22 +57,48 @@
* We make no fairness assumptions. They have a cost.
*/
-typedef struct {
- volatile unsigned int lock;
- unsigned long previous;
-} spinlock_t;
+struct _spinlock_debug {
+ volatile unsigned long lock;
+ volatile unsigned long owner_pc;
+};
+
+typedef struct _spinlock_debug spinlock_t;
#define SPIN_LOCK_UNLOCKED { 0, 0 }
-#define spin_unlock(lock) ((lock)->lock = 0)
+#define SPIN_LOCK_UNLOCKED { 0, 0 }
+#define spin_lock_init(lp) do { (lp)->owner_pc = 0; (lp)->lock = 0; } while(0)
+#define spin_unlock_wait(lp) do { barrier(); } while((lp)->lock)
+
+extern void _spin_lock(spinlock_t *lock);
+extern int _spin_trylock(spinlock_t *lock);
+extern void _spin_unlock(spinlock_t *lock);
+extern void _spin_lock_irq(spinlock_t *lock);
+extern void _spin_unlock_irq(spinlock_t *lock);
+extern void _spin_lock_irqsave(spinlock_t *lock);
+extern void _spin_unlock_irqrestore(spinlock_t *lock);
+
+#define spin_lock(lp) _spin_lock(lp)
+#define spin_trylock(lp) _spin_trylock(lp)
+#define spin_unlock(lp) _spin_unlock(lp)
+#define spin_lock_irq(lp) _spin_lock_irq(lp)
+#define spin_unlock_irq(lp) _spin_unlock_irq(lp)
+#define spin_lock_irqsave(lp, flags) do { __save_and_cli(flags); \
+ _spin_lock_irqsave(lp); } while (0)
+#define spin_unlock_irqrestore(lp, flags) do { _spin_unlock_irqrestore(lp); \
+ __restore_flags(flags); } while(0)
+#if 0
+extern __inline__ void spin_unlock(spinlock_t *lock)
+{
+ __asm__ __volatile__("stw 0,%0" : : "m" (lock) : "memory");
+}
static inline void spin_lock(spinlock_t * lock)
{
int stuck = 10000000;
int tmp, val;
- __label__ l1;
-l1: __asm__ __volatile__(
+ __asm__ __volatile__(
" mtctr %2\n"
"1: lwarx %0,0,%3\n"
" andi. %1,%0,1\n\t"
@@ -83,12 +109,13 @@
: "=r" (tmp), "=r" (val)
: "r" (stuck), "r" (lock)
: "ctr");
- if (!val) {
- printk("spinlock stuck at %p (%lx)\n", &&l1, lock->previous);
- } else
- lock->previous = (unsigned long) &&l1;
+ if (!val)
+ {
+ unsigned long __nip;
+ asm("mfnip %0\n": "=r" (__nip));
+ printk("spinlock stuck at %08lx\n", __nip);
+ }
}
-
#define spin_trylock(lock) (!set_bit(0,(lock)))
#define spin_lock_irq(lock) \
@@ -102,6 +129,59 @@
#define spin_unlock_irqrestore(lock, flags) \
do { spin_unlock(lock); __restore_flags(flags); } while (0)
+#endif
+
+struct _rwlock_debug {
+ volatile unsigned int lock;
+ unsigned long owner_pc;
+};
+typedef struct _rwlock_debug rwlock_t;
+
+#define RW_LOCK_UNLOCKED { 0, 0 }
+
+extern void _read_lock(rwlock_t *rw);
+extern void _read_unlock(rwlock_t *rw);
+extern void _write_lock(rwlock_t *rw);
+extern void _write_unlock(rwlock_t *rw);
+extern void _read_lock_irq(rwlock_t *rw);
+extern void _read_unlock_irq(rwlock_t *rw);
+extern void _write_lock_irq(rwlock_t *rw);
+extern void _write_unlock_irq(rwlock_t *rw);
+extern void _read_lock_irqsave(rwlock_t *rw);
+extern void _read_unlock_irqrestore(rwlock_t *rw);
+extern void _write_lock_irqsave(rwlock_t *rw);
+extern void _write_unlock_irqrestore(rwlock_t *rw);
+
+#define read_lock(rw) _read_lock(rw)
+#define read_unlock(rw) _read_unlock(rw)
+#define write_lock(rw) _write_lock(rw)
+#define write_unlock(rw) _write_unlock(rw)
+#define read_lock_irq(rw) _read_lock_irq(rw)
+#define read_unlock_irq(rw) _read_unlock_irq(rw)
+#define write_lock_irq(rw) _write_lock_irq(rw)
+#define write_unlock_irq(rw) _write_unlock_irq(rw)
+
+#define read_lock_irqsave(rw, flags) \
+do { __save_and_cli(flags); _read_lock_irqsave(rw); } while (0)
+
+#define read_unlock_irqrestore(rw, flags) do { _read_unlock_irqrestore(rw); \
+ __restore_flags(flags); } while(0)
+
+#define write_lock_irqsave(rw, flags) \
+do { __save_and_cli(flags); _write_lock_irqsave(rw); } while(0)
+
+#define write_unlock_irqrestore(rw, flags) do { _write_unlock_irqrestore(rw); \
+ __restore_flags(flags); } while(0)
+
#endif /* SMP */
#endif /* __ASM_SPINLOCK_H */
+
+
+
+
+
+
+
+
+
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov