patch-2.2.0-pre1 linux/include/asm-i386/system.h
Next file: linux/include/asm-i386/timex.h
Previous file: linux/include/asm-i386/smplock.h
Back to the patch index
Back to the overall index
- Lines: 65
- Date:
Mon Dec 28 14:09:28 1998
- Orig file:
v2.1.132/linux/include/asm-i386/system.h
- Orig date:
Sun Nov 8 14:03:08 1998
diff -u --recursive --new-file v2.1.132/linux/include/asm-i386/system.h linux/include/asm-i386/system.h
@@ -35,30 +35,30 @@
"a" (prev), "d" (next)); \
} while (0)
-#define _set_base(addr,base) \
-__asm__("movw %%dx,%0\n\t" \
+#define _set_base(addr,base) do { unsigned long __pr; \
+__asm__ __volatile__ ("movw %%dx,%1\n\t" \
"rorl $16,%%edx\n\t" \
- "movb %%dl,%1\n\t" \
- "movb %%dh,%2" \
- : /* no output */ \
+ "movb %%dl,%2\n\t" \
+ "movb %%dh,%3" \
+ :"=&d" (__pr) \
:"m" (*((addr)+2)), \
"m" (*((addr)+4)), \
"m" (*((addr)+7)), \
- "d" (base) \
- :"dx")
+ "0" (base) \
+ ); } while(0)
-#define _set_limit(addr,limit) \
-__asm__("movw %%dx,%0\n\t" \
+#define _set_limit(addr,limit) do { unsigned long __lr; \
+__asm__ __volatile__ ("movw %%dx,%1\n\t" \
"rorl $16,%%edx\n\t" \
- "movb %1,%%dh\n\t" \
+ "movb %2,%%dh\n\t" \
"andb $0xf0,%%dh\n\t" \
"orb %%dh,%%dl\n\t" \
- "movb %%dl,%1" \
- : /* no output */ \
+ "movb %%dl,%2" \
+ :"=&d" (__lr) \
:"m" (*(addr)), \
"m" (*((addr)+6)), \
- "d" (limit) \
- :"dx")
+ "0" (limit) \
+ ); } while(0)
#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1)>>12 )
@@ -165,8 +165,19 @@
* Force strict CPU ordering.
* And yes, this is required on UP too when we're talking
* to devices.
+ *
+ * For now, "wmb()" doesn't actually do anything, as all
+ * Intel CPU's follow what Intel calls a *Processor Order*,
+ * in which all writes are seen in the program order even
+ * outside the CPU.
+ *
+ * I expect future Intel CPU's to have a weaker ordering,
+ * but I'd also expect them to finally get their act together
+ * and add some real memory barriers if so.
*/
#define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
+#define rmb() mb()
+#define wmb() __asm__ __volatile__ ("": : :"memory")
/* interrupt control.. */
#define __sti() __asm__ __volatile__ ("sti": : :"memory")
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov