patch-1.3.48 linux/include/asm-mips/segment.h
Next file: linux/include/asm-mips/shmparam.h
Previous file: linux/include/asm-mips/resource.h
Back to the patch index
Back to the overall index
- Lines: 239
- Date:
Wed Dec 13 12:39:46 1995
- Orig file:
v1.3.47/linux/include/asm-mips/segment.h
- Orig date:
Wed Jan 25 08:54:23 1995
diff -u --recursive --new-file v1.3.47/linux/include/asm-mips/segment.h linux/include/asm-mips/segment.h
@@ -7,81 +7,119 @@
*
* Copyright (C) 1994, 1995 by Ralf Baechle
*
+ * Note that the quad functions are only being used for the 64 bit kernel and
+ * therefore it isn't really important that they will be misscompiled for
+ * 32-bit kernels.
*/
#ifndef __ASM_MIPS_SEGMENT_H
#define __ASM_MIPS_SEGMENT_H
+#ifndef __LANGUAGE_ASSEMBLY__
/*
- * Memory segments (32bit kernel mode addresses)
+ * For memcpy()
*/
-#define KUSEG 0x00000000
-#define KSEG0 0x80000000
-#define KSEG1 0xa0000000
-#define KSEG2 0xc0000000
-#define KSEG3 0xe0000000
+#include <linux/string.h>
/*
- * returns the kernel segment base of a given address
+ * This is a gcc optimization barrier, which essentially
+ * inserts a sequence point in the gcc RTL tree that gcc
+ * can't move code around. This is needed when we enter
+ * or exit a critical region (in this case around user-level
+ * accesses that may sleep, and we can't let gcc optimize
+ * global state around them).
*/
-#define KSEGX(a) (a & 0xe0000000)
-
-#ifndef __ASSEMBLY__
+#define __gcc_barrier() __asm__ __volatile__("": : :"memory")
/*
- * Beware: the xxx_fs_word functions work on 16bit words!
+ * Uh, these should become the main single-value transfer routines..
+ * They automatically use the right size if we just have the right
+ * pointer type..
*/
-#define get_fs_byte(addr) get_user_byte((char *)(addr))
-static inline unsigned char get_user_byte(const char *addr)
-{
- return *addr;
-}
-
-#define get_fs_word(addr) get_user_word((short *)(addr))
-static inline unsigned short get_user_word(const short *addr)
-{
- return *addr;
-}
-
-#define get_fs_long(addr) get_user_long((int *)(addr))
-static inline unsigned long get_user_long(const int *addr)
-{
- return *addr;
-}
+#define put_user(x,ptr) __put_user((unsigned long)(x),(ptr),sizeof(*(ptr)))
+#define get_user(ptr) ((__typeof__(*(ptr)))__get_user((ptr),sizeof(*(ptr))))
-#define get_fs_dlong(addr) get_user_dlong((long long *)(addr))
-static inline unsigned long get_user_dlong(const long long *addr)
-{
- return *addr;
-}
-
-#define put_fs_byte(x,addr) put_user_byte((x),(char *)(addr))
-static inline void put_user_byte(char val,char *addr)
-{
- *addr = val;
-}
-
-#define put_fs_word(x,addr) put_user_word((x),(short *)(addr))
-static inline void put_user_word(short val,short * addr)
-{
- *addr = val;
-}
-
-#define put_fs_long(x,addr) put_user_long((x),(int *)(addr))
-static inline void put_user_long(unsigned long val,int * addr)
-{
- *addr = val;
-}
+/*
+ * This is a silly but good way to make sure that
+ * the __put_user function is indeed always optimized,
+ * and that we use the correct sizes..
+ */
+extern int bad_user_access_length(void);
-#define put_fs_dlong(x,addr) put_user_dlong((x),(int *)(addr))
-static inline void put_user_dlong(unsigned long val,long long * addr)
+/* I should make this use unaligned transfers etc.. */
+static inline void __put_user(unsigned long x, void * y, int size)
{
- *addr = val;
+ __gcc_barrier();
+ switch (size) {
+ case 1:
+ *(char *) y = x;
+ break;
+ case 2:
+ *(short *) y = x;
+ break;
+ case 4:
+ *(int *) y = x;
+ break;
+ case 8:
+ *(long *) y = x;
+ break;
+ default:
+ bad_user_access_length();
+ }
+ __gcc_barrier();
+}
+
+/* I should make this use unaligned transfers etc.. */
+static inline unsigned long __get_user(const void * y, int size)
+{
+ unsigned long result;
+
+ __gcc_barrier();
+ switch (size) {
+ case 1:
+ result = *(unsigned char *) y;
+ break;
+ case 2:
+ result = *(unsigned short *) y;
+ break;
+ case 4:
+ result = *(unsigned int *) y;
+ break;
+ case 8:
+ result = *(unsigned long *) y;
+ break;
+ default:
+ result = bad_user_access_length();
+ break;
+ }
+ __gcc_barrier();
+
+ return result;
+}
+
+#define get_fs_byte(addr) get_user((unsigned char *)(addr))
+#define get_fs_word(addr) get_user((unsigned short *)(addr))
+#define get_fs_long(addr) get_user((unsigned int *)(addr))
+#define get_fs_quad(addr) get_user((unsigned long *)(addr))
+
+#define put_fs_byte(x,addr) put_user((x),(char *)(addr))
+#define put_fs_word(x,addr) put_user((x),(short *)(addr))
+#define put_fs_long(x,addr) put_user((x),(int *)(addr))
+#define put_fs_quad(x,addr) put_user((x),(long *)(addr))
+
+static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
+{
+ __gcc_barrier();
+ memcpy(to, from, n);
+ __gcc_barrier();
+}
+
+static inline void memcpy_tofs(void * to, const void * from, unsigned long n)
+{
+ __gcc_barrier();
+ memcpy(to, from, n);
+ __gcc_barrier();
}
-#define memcpy_fromfs(to, from, n) memcpy((to),(from),(n))
-
-#define memcpy_tofs(to, from, n) memcpy((to),(from),(n))
-
/*
* For segmented architectures, these are used to specify which segment
* to use for the above functions.
@@ -94,18 +132,57 @@
static inline unsigned long get_fs(void)
{
- return 0;
+ return USER_DS;
}
static inline unsigned long get_ds(void)
{
- return 0;
+ return KERNEL_DS;
}
static inline void set_fs(unsigned long val)
{
}
-#endif /* !__ASSEMBLY__ */
+#endif /* !__LANGUAGE_ASSEMBLY__ */
+
+/*
+ * Memory segments (32bit kernel mode addresses)
+ */
+#define KUSEG 0x00000000
+#define KSEG0 0x80000000
+#define KSEG1 0xa0000000
+#define KSEG2 0xc0000000
+#define KSEG3 0xe0000000
+
+/*
+ * Returns the kernel segment base of a given address
+ */
+#define KSEGX(a) (((unsigned long)(a)) & 0xe0000000)
+
+/*
+ * Returns the physical address of a KSEG0/KSEG1 address
+ */
+#define PHYSADDR(a) (((unsigned long)(a)) & 0x1fffffff)
+
+/*
+ * Map an address to a certain kernel segment
+ */
+#define KSEG0ADDR(a) ((((unsigned long)(a)) & 0x1fffffff) | KSEG0)
+#define KSEG1ADDR(a) ((((unsigned long)(a)) & 0x1fffffff) | KSEG1)
+#define KSEG2ADDR(a) ((((unsigned long)(a)) & 0x1fffffff) | KSEG2)
+#define KSEG3ADDR(a) ((((unsigned long)(a)) & 0x1fffffff) | KSEG3)
+
+/*
+ * Memory segments (64bit kernel mode addresses)
+ */
+#define XKUSEG 0x0000 0000 0000 0000
+#define XKSSEG 0x4000 0000 0000 0000
+#define XKPHYS 0x8000 0000 0000 0000
+#define XKSEG 0xc000 0000 0000 0000
+#define CKSEG0 0xffff ffff 8000 0000
+#define CKSEG1 0xffff ffff a000 0000
+#define CKSSEG 0xffff ffff c000 0000
+#define CKSEG3 0xffff ffff e000 0000
#endif /* __ASM_MIPS_SEGMENT_H */
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov
with Sam's (original) version of this