patch-2.1.30 linux/include/asm-sparc64/checksum.h
Next file: linux/include/asm-sparc64/dma.h
Previous file: linux/include/asm-sparc64/cache.h
Back to the patch index
Back to the overall index
- Lines: 182
- Date:
Thu Mar 20 16:45:06 1997
- Orig file:
v2.1.29/linux/include/asm-sparc64/checksum.h
- Orig date:
Fri Dec 13 01:37:41 1996
diff -u --recursive --new-file v2.1.29/linux/include/asm-sparc64/checksum.h linux/include/asm-sparc64/checksum.h
@@ -1,4 +1,4 @@
-/* $Id: checksum.h,v 1.3 1996/12/12 15:39:13 davem Exp $ */
+/* $Id: checksum.h,v 1.5 1997/03/18 18:00:28 jj Exp $ */
#ifndef __SPARC64_CHECKSUM_H
#define __SPARC64_CHECKSUM_H
@@ -8,6 +8,7 @@
* Copyright(C) 1995 Miguel de Icaza
* Copyright(C) 1996 David S. Miller
* Copyright(C) 1996 Eddie C. Dost
+ * Copyright(C) 1997 Jakub Jelinek
*
* derived from:
* Alpha checksum c-code
@@ -15,6 +16,8 @@
* RFC1071 Computing the Internet Checksum
*/
+#include <asm/uaccess.h>
+
/* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
*
@@ -34,11 +37,83 @@
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
-extern unsigned int csum_partial_copy(char *src, char *dst, int len, int sum);
-
+/* FIXME: Remove these macros ASAP */
+#define csum_partial_copy(src, dst, len, sum) \
+ csum_partial_copy_nocheck(src,dst,len,sum)
#define csum_partial_copy_fromuser(s, d, l, w) \
- csum_partial_copy((char *) (s), (d), (l), (w))
+ csum_partial_copy((char *) (s), (d), (l), (w))
+
+extern __inline__ unsigned int
+csum_partial_copy_nocheck (const char *src, char *dst, int len,
+ unsigned int sum)
+{
+ register unsigned long ret asm("o0") = (unsigned long)src;
+ register char *d asm("o1") = dst;
+ register unsigned long l asm("g1") = len;
+
+ __asm__ __volatile__ ("
+ call __csum_partial_copy_sparc_generic
+ mov %4, %%g7
+ " : "=r" (ret) : "0" (ret), "r" (d), "r" (l), "r" (sum) :
+ "o1", "o2", "o3", "o4", "o5", "o7", "g1", "g2", "g3", "g5", "g7");
+ return (unsigned int)ret;
+}
+extern __inline__ unsigned int
+csum_partial_copy_from_user(const char *src, char *dst, int len,
+ unsigned int sum, int *err)
+{
+ if (!access_ok (VERIFY_READ, src, len)) {
+ *err = -EFAULT;
+ memset (dst, 0, len);
+ return sum;
+ } else {
+ register unsigned long ret asm("o0") = (unsigned long)src;
+ register char *d asm("o1") = dst;
+ register unsigned long l asm("g1") = len;
+ register unsigned long s asm("g7") = sum;
+
+ __asm__ __volatile__ ("
+ .section __ex_table,#alloc
+ .align 4
+ .word 1f,2
+ .previous
+1:
+ call __csum_partial_copy_sparc_generic
+ stx %5, [%%sp + 0x7ff + 128]
+ " : "=r" (ret) : "0" (ret), "r" (d), "r" (l), "r" (s), "r" (err) :
+ "o1", "o2", "o3", "o4", "o5", "o7", "g1", "g2", "g3", "g5", "g7");
+ return (unsigned int)ret;
+ }
+}
+
+extern __inline__ unsigned int
+csum_partial_copy_to_user(const char *src, char *dst, int len,
+ unsigned int sum, int *err)
+{
+ if (!access_ok (VERIFY_WRITE, dst, len)) {
+ *err = -EFAULT;
+ return sum;
+ } else {
+ register unsigned long ret asm("o0") = (unsigned long)src;
+ register char *d asm("o1") = dst;
+ register unsigned long l asm("g1") = len;
+ register unsigned long s asm("g7") = sum;
+
+ __asm__ __volatile__ ("
+ .section __ex_table,#alloc
+ .align 4
+ .word 1f,1
+ .previous
+1:
+ call __csum_partial_copy_sparc_generic
+ stx %5, [%%sp + 0x7ff + 128]
+ " : "=r" (ret) : "0" (ret), "r" (d), "r" (l), "r" (s), "r" (err) :
+ "o1", "o2", "o3", "o4", "o5", "o7", "g1", "g2", "g3", "g5", "g7");
+ return (unsigned int)ret;
+ }
+}
+
/* ihl is always 5 or greater, almost always is 5, and iph is word aligned
* the majority of the time.
*/
@@ -52,7 +127,7 @@
* both operands.
*/
__asm__ __volatile__("
- sub %2, 4, %%g4
+ sub %2, 4, %%g7
lduw [%1 + 0x00], %0
lduw [%1 + 0x04], %%g2
lduw [%1 + 0x08], %%g3
@@ -66,10 +141,10 @@
addcc %%g3, %0, %0
add %1, 4, %1
addccc %0, %%g0, %0
- subcc %%g4, 1, %%g4
+ subcc %%g7, 1, %%g7
be,a,pt %%icc, 2f
sll %0, 16, %%g2
- ba,pt 1b
+ ba,pt %%xcc, 1b
lduw [%1 + 0x10], %%g3
2:
addcc %0, %%g2, %%g2
@@ -78,7 +153,7 @@
xnor %%g0, %0, %0
" : "=r" (sum), "=&r" (iph)
: "r" (ihl), "1" (iph)
- : "g2", "g3", "g4");
+ : "g2", "g3", "g7");
return sum;
}
@@ -130,29 +205,29 @@
unsigned int sum)
{
__asm__ __volatile__ ("
- addcc %3, %4, %%g4
- addccc %5, %%g4, %%g4
+ addcc %3, %4, %%g7
+ addccc %5, %%g7, %%g7
lduw [%2 + 0x0c], %%g2
lduw [%2 + 0x08], %%g3
- addccc %%g2, %%g4, %%g4
+ addccc %%g2, %%g7, %%g7
lduw [%2 + 0x04], %%g2
- addccc %%g3, %%g4, %%g4
+ addccc %%g3, %%g7, %%g7
lduw [%2 + 0x00], %%g3
- addccc %%g2, %%g4, %%g4
+ addccc %%g2, %%g7, %%g7
lduw [%1 + 0x0c], %%g2
- addccc %%g3, %%g4, %%g4
+ addccc %%g3, %%g7, %%g7
lduw [%1 + 0x08], %%g3
- addccc %%g2, %%g4, %%g4
+ addccc %%g2, %%g7, %%g7
lduw [%1 + 0x04], %%g2
- addccc %%g3, %%g4, %%g4
+ addccc %%g3, %%g7, %%g7
lduw [%1 + 0x00], %%g3
- addccc %%g2, %%g4, %%g4
- addccc %%g3, %%g4, %0
+ addccc %%g2, %%g7, %%g7
+ addccc %%g3, %%g7, %0
addc 0, %0, %0
" : "=&r" (sum)
: "r" (saddr), "r" (daddr), "r"(htonl((__u32) (len))),
"r"(htonl(proto)), "r"(sum)
- : "g2", "g3", "g4");
+ : "g2", "g3", "g7");
return csum_fold(sum);
}
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov