patch-2.1.48 linux/arch/ppc/kernel/misc.S
Next file: linux/arch/ppc/kernel/mk_defs.c
Previous file: linux/arch/ppc/kernel/ksyms.c
Back to the patch index
Back to the overall index
- Lines: 941
- Date:
Thu Jul 31 13:09:17 1997
- Orig file:
v2.1.47/linux/arch/ppc/kernel/misc.S
- Orig date:
Tue May 13 22:41:03 1997
diff -u --recursive --new-file v2.1.47/linux/arch/ppc/kernel/misc.S linux/arch/ppc/kernel/misc.S
@@ -1,234 +1,40 @@
/*
- * This module contains the PowerPC interrupt fielders
- * set of code at specific locations, based on function
+ * This file contains miscellaneous low-level functions.
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
*/
-#include "ppc_asm.tmpl"
+#include <linux/config.h>
#include <linux/sys.h>
+#include <asm/unistd.h>
#include <asm/errno.h>
-#include "ppc_defs.h"
#include <asm/processor.h>
-
-/* Keep track of low-level exceptions - rather crude, but informative */
-#define STATS
-
-/*
- * Increment a [64 bit] statistic counter
- * Uses R2, R3
- */
-#define BUMP(ctr) \
- lis r2,ctr@h; \
- ori r2,r2,ctr@l; \
- lwz r3,4(r2); \
- addic r3,r3,1; \
- stw r3,4(r2); \
- lwz r3,0(r2); \
- addze r3,r3; \
- stw r3,0(r2)
+#include "ppc_asm.tmpl"
+#include "ppc_defs.h"
-/*#ifdef CONFIG_603*/
-/* This instruction is not implemented on the PPC 603 */
+/* This instruction is not implemented on the PPC 601 or 603 */
#define tlbia \
- li r4,64; \
+ li r4,128; \
mtspr CTR,r4; \
li r4,0; \
0: tlbie r4; \
addi r4,r4,0x1000; \
bdnz 0b
-/*#endif*/ /* CONFIG_603*/
_TEXT()
-#define CPU_CTL 0x80000092
-_GLOBAL(hard_reset_now)
- mfmsr r3 /* Disable interrupts */
- li r4,0
- ori r4,r4,MSR_EE
- andc r3,r3,r4
- ori r3,r3,MSR_IP /* Set FLASH/ROM interrupt handlers */
- sync
- mtmsr r3
- lis r3,CPU_CTL>>16
- ori r3,r3,(CPU_CTL&0xFFFF)
- lbz r4,0(r3) /* Turn on SRESET */
- li r5,1
- andc r4,r4,r5 /* Make sure we go from 0->1 */
- stb r4,0(r3)
- ori r4,r4,1
- stb r4,0(r3) /* This should do it! */
-99: nop
- b 99b
-
-#if 0
-/*
- unsigned short
- le16_to_cpu(unsigned short val)
-*/
-_GLOBAL(le16_to_cpu)
- lis r4,_le_scratch@h
- ori r4,r4,_le_scratch@l
- sth r3,0(r4)
- li r5,0
- lhbrx r3,r4,r5
- blr
-
-_GLOBAL(le32_to_cpu)
- lis r4,_le_scratch@h
- ori r4,r4,_le_scratch@l
- stw r3,0(r4)
- li r5,0
- lwbrx r3,r4,r5
- blr
-_GLOBAL(_le_scratch)
- .space 4
-#endif
-#if 1
-/*
-extern int __put_user_8(char, char *);
-extern int __put_user_16(short, short *);
-extern int __put_user_32(long, long *);
-*/
-_GLOBAL(__put_user_8)
- /* setup exception stuff */
- lis r2,current_set@ha
- lwz r2,current_set@l(r2)
- /* increment excount */
- lwz r6,TSS+TSS_EXCOUNT(r2)
- addi r6,r6,1
- stw r6,TSS+TSS_EXCOUNT(r2)
- /* set expc */
- lis r6,1f@h
- ori r6,r6,1f@l
- stw r6,TSS+TSS_EXPC(r2)
-
- stb r3,0(r4)
- li r3,0 /* successful return */
- li r6,0
- stw r6,TSS+TSS_EXCOUNT(r2)
- blr
-1: li r3,-EFAULT /* bad access */
- li r6,0
- stw r6,TSS+TSS_EXCOUNT(r2)
- blr
-
-_GLOBAL(__put_user_16)
- /* setup exception stuff */
- lis r2,current_set@ha
- lwz r2,current_set@l(r2)
- /* increment excount */
- lwz r6,TSS+TSS_EXCOUNT(r2)
- addi r6,r6,1
- stw r6,TSS+TSS_EXCOUNT(r2)
- /* set expc */
- lis r6,1f@h
- ori r6,r6,1f@l
- stw r6,TSS+TSS_EXPC(r2)
-
- sth r3,0(r4)
- li r3,0 /* successful return */
- li r6,0
- stw r6,TSS+TSS_EXCOUNT(r2)
- blr
-1: li r3,-EFAULT /* bad access */
- li r6,0
- stw r6,TSS+TSS_EXCOUNT(r2)
- blr
-
-_GLOBAL(__put_user_32)
- /* setup exception stuff */
- lis r2,current_set@ha
- lwz r2,current_set@l(r2)
- /* increment excount */
- lwz r6,TSS+TSS_EXCOUNT(r2)
- addi r6,r6,1
- stw r6,TSS+TSS_EXCOUNT(r2)
- /* set expc */
- lis r6,1f@h
- ori r6,r6,1f@l
- stw r6,TSS+TSS_EXPC(r2)
-
- stw r3,0(r4)
- li r3,0 /* successful return */
- li r6,0
- stw r6,TSS+TSS_EXCOUNT(r2)
- blr
-1: li r3,-EFAULT /* bad access */
- li r6,0
- stw r6,TSS+TSS_EXCOUNT(r2)
- blr
-
-_GLOBAL(__get_user_8)
- /* setup exception stuff */
- lis r2,current_set@ha
- lwz r2,current_set@l(r2)
- /* increment excount */
- lwz r6,TSS+TSS_EXCOUNT(r2)
- addi r6,r6,1
- stw r6,TSS+TSS_EXCOUNT(r2)
- /* set expc */
- lis r6,1f@h
- ori r6,r6,1f@l
- stw r6,TSS+TSS_EXPC(r2)
-
- lbz r3,0(r4)
- li r4,0 /* successful return */
- li r6,0
- stw r6,TSS+TSS_EXCOUNT(r2)
- blr
-1: li r4,-EFAULT /* bad access */
- li r6,0
- stw r6,TSS+TSS_EXCOUNT(r2)
- blr
-
-_GLOBAL(__get_user_16)
- /* setup exception stuff */
- lis r2,current_set@ha
- lwz r2,current_set@l(r2)
- /* increment excount */
- lwz r6,TSS+TSS_EXCOUNT(r2)
- addi r6,r6,1
- stw r6,TSS+TSS_EXCOUNT(r2)
- /* set expc */
- lis r6,1f@h
- ori r6,r6,1f@l
- stw r6,TSS+TSS_EXPC(r2)
-
- lhz r3,0(r4)
- li r4,0 /* successful return */
- li r6,0
- stw r6,TSS+TSS_EXCOUNT(r2)
- blr
-1: li r4,-EFAULT /* bad access */
- li r6,0
- stw r6,TSS+TSS_EXCOUNT(r2)
- blr
-
-_GLOBAL(__get_user_32)
- /* setup exception stuff */
- lis r2,current_set@ha
- lwz r2,current_set@l(r2)
- /* increment excount */
- lwz r6,TSS+TSS_EXCOUNT(r2)
- addi r6,r6,1
- stw r6,TSS+TSS_EXCOUNT(r2)
- /* set expc */
- lis r6,1f@h
- ori r6,r6,1f@l
- stw r6,TSS+TSS_EXPC(r2)
- lwz r3,0(r4)
- li r4,0 /* successful return */
- li r6,0
- stw r6,TSS+TSS_EXCOUNT(r2)
- blr
-1: li r4,-EFAULT /* bad access */
- li r6,0
- stw r6,TSS+TSS_EXCOUNT(r2)
- blr
-#endif
+
/*
* Disable interrupts
* rc = _disable_interrupts()
*/
_GLOBAL(_disable_interrupts)
+_GLOBAL(__cli)
+_GLOBAL(_hard_cli)
mfmsr r0 /* Get current interrupt state */
rlwinm r3,r0,16+1,32-1,31 /* Extract old value of 'EE' */
li r4,0 /* Need [unsigned] value of MSR_EE */
@@ -244,20 +50,18 @@
* turns on interrupts if state = 1.
*/
_GLOBAL(_enable_interrupts)
- mfmsr r0 /* Get current state */
- rlwimi r0,r3,16-1,32-16,32-16 /* Insert bit */
+ cmpi 0,r3,0 /* turning them on? */
+ beqlr /* nothing to do if state == 0 */
+_GLOBAL(__sti)
+_GLOBAL(_hard_sti)
+ lis r4,lost_interrupts@ha
+ lwz r4,lost_interrupts@l(r4)
+ mfmsr r3 /* Get current state */
+ ori r3,r3,MSR_EE /* Turn on 'EE' bit */
+ cmpi 0,r4,0 /* lost interrupts to process first? */
+ bne- do_lost_interrupts
sync /* Some chip revs have problems here... */
- mtmsr r0 /* Update machine state */
- blr
-
-/*
- * Get 'flags' (aka machine status register)
- * __save_flags(long *ptr)
- */
-_GLOBAL(__save_flags)
- mfmsr r0 /* Get current state */
- stw r0,0(r3)
- mr r3,r0
+ mtmsr r3 /* Update machine state */
blr
/*
@@ -265,34 +69,37 @@
* __restore_flags(long val)
*/
_GLOBAL(__restore_flags)
- sync /* Some chip revs have problems here... */
+ andi. r0,r3,MSR_EE /* enabling interrupts? */
+ beq 2f
+ lis r4,lost_interrupts@ha
+ lwz r4,lost_interrupts@l(r4)
+ cmpi 0,r4,0
+ bne do_lost_interrupts
+2: sync /* Some chip revs have problems here... */
mtmsr r3
isync
blr
/*
- * Disable interrupts - like an 80x86
- * cli()
- */
-_GLOBAL(cli)
- mfmsr r0 /* Get current interrupt state */
- rlwinm r3,r0,16+1,32-1,31 /* Extract old value of 'EE' */
- li r4,0 /* Need [unsigned] value of MSR_EE */
- ori r4,r4,MSR_EE /* Set to turn off bit */
- andc r0,r0,r4 /* Clears bit in (r4) */
- sync /* Some chip revs have problems here... */
- mtmsr r0 /* Update machine state */
- blr /* Done */
-
-/*
- * Enable interrupts - like an 80x86
- * sti()
+ * We were about to enable interrupts but we have to simulate
+ * some interrupts that were lost by enable_irq first.
*/
-_GLOBAL(sti)
- mfmsr r0 /* Get current state */
- ori r0,r0,MSR_EE /* Turn on 'EE' bit */
- sync /* Some chip revs have problems here... */
- mtmsr r0 /* Update machine state */
+do_lost_interrupts:
+ stwu r1,-16(r1)
+ mflr r0
+ stw r0,20(r1)
+ stw r3,8(r1)
+1: bl fake_interrupt
+ lis r4,lost_interrupts@ha
+ lwz r4,lost_interrupts@l(r4)
+ cmpi 0,r4,0
+ bne- 1b
+ lwz r3,8(r1)
+ sync
+ mtmsr r3
+ lwz r0,20(r1)
+ mtlr r0
+ addi r1,r1,16
blr
/*
@@ -300,7 +107,6 @@
*/
_GLOBAL(_tlbia)
tlbia
- BUMP(__TLBIAs)
blr
/*
@@ -308,17 +114,7 @@
*/
_GLOBAL(_tlbie)
tlbie r3
- BUMP(__TLBIEs)
blr
-
-/*
- * Fetch the current SR register
- * get_SR(int index)
- */
-_GLOBAL(get_SR)
- mfsrin r3,r3
- blr
-
/*
* Atomic [test&set] exchange
*
@@ -340,7 +136,11 @@
* void atomic_sub(int c, int *v)
* void atomic_inc(int *v)
* void atomic_dec(int *v)
- * void atomic_dec_and_test(int *v)
+ * int atomic_dec_and_test(int *v)
+ * int atomic_inc_return(int *v)
+ * int atomic_dec_return(int *v)
+ * void atomic_clear_mask(atomic_t mask, atomic_t *addr)
+ * void atomic_set_mask(atomic_t mask, atomic_t *addr);
*/
_GLOBAL(atomic_add)
10: lwarx r5,0,r4 /* Fetch old value & reserve */
@@ -360,69 +160,46 @@
stwcx. r5,0,r3 /* Update with new value */
bne- 10b /* Retry if "reservation" (i.e. lock) lost */
blr
+_GLOBAL(atomic_inc_return)
+10: lwarx r5,0,r3 /* Fetch old value & reserve */
+ addi r5,r5,1 /* Perform 'add' operation */
+ stwcx. r5,0,r3 /* Update with new value */
+ bne- 10b /* Retry if "reservation" (i.e. lock) lost */
+ mr r3,r5 /* Return new value */
+ blr
_GLOBAL(atomic_dec)
10: lwarx r5,0,r3 /* Fetch old value & reserve */
subi r5,r5,1 /* Perform 'add' operation */
stwcx. r5,0,r3 /* Update with new value */
bne- 10b /* Retry if "reservation" (i.e. lock) lost */
blr
+_GLOBAL(atomic_dec_return)
+10: lwarx r5,0,r3 /* Fetch old value & reserve */
+ subi r5,r5,1 /* Perform 'add' operation */
+ stwcx. r5,0,r3 /* Update with new value */
+ bne- 10b /* Retry if "reservation" (i.e. lock) lost */
+ mr r3,r5 /* Return new value */
+ blr
_GLOBAL(atomic_dec_and_test)
10: lwarx r5,0,r3 /* Fetch old value & reserve */
subi r5,r5,1 /* Perform 'add' operation */
stwcx. r5,0,r3 /* Update with new value */
bne- 10b /* Retry if "reservation" (i.e. lock) lost */
cmpi 0,r5,0 /* Return 'true' IFF 0 */
- bne 15f
li r3,1
+ beqlr
+ li r3,0
blr
-15: li r3,0
- blr
-
-
-/*
- * Delay for a specific # of "loops"
- * __delay(int loops)
- */
-_GLOBAL(__delay)
- mtctr r3
-00: addi r3,r3,0 /* NOP */
- bdnz 00b
- blr
-
-/*
- * Delay for a number of microseconds
- * udelay(int usecs)
- */
-_GLOBAL(udelay)
-00: li r0,86 /* Instructions / microsecond? */
- mtctr r0
-10: addi r0,r0,0 /* NOP */
- bdnz 10b
- subic. r3,r3,1
- bne 00b
- blr
-
-/*
- * Atomically increment [intr_count]
- */
-_GLOBAL(start_bh_atomic)
- lis r3,intr_count@h
- ori r3,r3,intr_count@l
-10: lwarx r4,0,r3
- addi r4,r4,1
- stwcx. r4,0,r3
+_GLOBAL(atomic_clear_mask)
+10: lwarx r5,0,r4
+ andc r5,r5,r3
+ stwcx. r5,0,r4
bne- 10b
blr
-
-/*
- * Atomically decrement [intr_count]
- */
-_GLOBAL(end_bh_atomic)
- lis r3,intr_count@h
- ori r3,r3,intr_count@l
-10: lwarx r4,0,r3
- subic r4,r4,1
- stwcx. r4,0,r3
+_GLOBAL(atomic_set_mask)
+10: lwarx r5,0,r4
+ or r5,r5,r3
+ stwcx. r5,0,r4
bne- 10b
blr
@@ -431,6 +208,8 @@
*
* insw(port, buf, len)
* outsw(port, buf, len)
+ * insl(port, buf, len)
+ * outsl(port, buf, len)
*/
_GLOBAL(_insw)
mtctr r5
@@ -448,135 +227,39 @@
bdnz 00b
blr
-#if 0
-/*
- *extern inline int find_first_zero_bit(void * vaddr, unsigned size)
- *{
- * unsigned long res;
- * unsigned long *p;
- * unsigned long *addr = vaddr;
- *
- * if (!size)
- * return 0;
- * __asm__ __volatile__ (" moveq #-1,d0\n\t"
- * "1:"
- * " cmpl %1@+,d0\n\t"
- * " bne 2f\n\t"
- * " subql #1,%0\n\t"
- * " bne 1b\n\t"
- * " bra 5f\n\t"
- * "2:"
- * " movel %1@-,d0\n\t"
- * " notl d0\n\t"
- * " bfffo d0{#0,#0},%0\n\t"
- * "5:"
- * : "=d" (res), "=a" (p)
- * : "0" ((size + 31) >> 5), "1" (addr)
- * : "d0");
- * return ((p - addr) << 5) + res;
- *}
- */
-_GLOBAL(find_first_zero_bit)
- li r5,0 /* bit # */
- subi r3,r3,4 /* Adjust pointer for auto-increment */
-00: lwzu r0,4(r3) /* Get next word */
- not. r7,r0 /* Complement to find ones */
- beq 10f /* Jump if all ones */
-02: andi. r7,r0,1 /* Check low-order bit */
- beq 20f /* All done when zero found */
- srawi r0,r0,1
- addi r5,r5,1
- b 02b
-10: addi r5,r5,32 /* Update bit # */
- subic. r4,r4,32 /* Any more? */
- bgt 00b
-20: mr r3,r5 /* Compute result */
- blr
-
-/*
- *static inline int find_next_zero_bit (void *vaddr, int size,
- * int offset)
- *{
- * unsigned long *addr = vaddr;
- * unsigned long *p = addr + (offset >> 5);
- * int set = 0, bit = offset & 31, res;
- *
- * if (bit) {
- * // Look for zero in first longword
- * __asm__("bfffo %1{#0,#0},%0"
- * : "=d" (set)
- * : "d" (~*p << bit));
- * if (set < (32 - bit))
- * return set + offset;
- * set = 32 - bit;
- * p++;
- * }
- * // No zero yet, search remaining full bytes for a zero
- * res = find_first_zero_bit (p, size - 32 * (p - addr));
- * return (offset + set + res);
- *}
- */
-_GLOBAL(find_next_zero_bit)
- addi r5,r5,1 /* bump offset to start */
- srawi r6,r5,5 /* word offset */
- add r6,r6,r6 /* byte offset */
- add r6,r6,r6 /* byte offset */
- add r3,r3,r6 /* compute byte position */
- sub r4,r4,r5 /* adjust size by starting index */
- andi. r0,r5,0x1F /* offset in current word? */
- beq 10f /* at start of word */
- lwz r0,0(r3) /* get word */
- sraw r0,r0,r5 /* shift right */
- not. r7,r0
- beq 07f /* jump if only ones remain */
-05: andi. r7,r0,1 /* found zero? */
- beq 90f /* yes - all done */
- srawi r0,r0,1
- addi r5,r5,1
- b 05b
-07: andi. r6,r5,0x1F
- subfic r0,r6,32
- add r5,r5,r0
- sub r4,r4,r0
- b 20f
-10: subi r3,r3,4 /* Adjust pointer for auto-increment */
-20: lwzu r0,4(r3) /* Get next word */
- not. r7,r0 /* Complement to find ones */
- beq 40f /* Jump if all ones */
-30: andi. r7,r0,1 /* Check low-order bit */
- beq 90f /* All done when zero found */
- srawi r0,r0,1
- addi r5,r5,1
- b 30b
-40: addi r5,r5,32 /* Update bit # */
- subic. r4,r4,32 /* Any more? */
- bgt 20b
-90: mr r3,r5 /* Compute result */
+_GLOBAL(_insl)
+ mtctr r5
+ subi r4,r4,4
+00: lwbrx r5,0,r3
+ stwu r5,4(r4)
+ bdnz 00b
blr
+
+_GLOBAL(_outsl)
+ mtctr r5
+ subi r4,r4,4
+00: lwzu r5,4(r4)
+ stwbrx r5,0,r3
+ bdnz 00b
+ blr
+
+#ifdef CONFIG_PMAC
+_GLOBAL(ide_insw)
+ mtctr r5
+ subi r4,r4,2
+00: lhzx r5,0,r3
+ sthu r5,2(r4)
+ bdnz 00b
+ blr
+
+_GLOBAL(ide_outsw)
+ mtctr r5
+ subi r4,r4,2
+00: lhzu r5,2(r4)
+ sthx r5,0,r3
+ bdnz 00b
+ blr
#endif
-
-/*
- *
- * ffz = Find First Zero in word. Undefined if no zero exists,
- * so code should check against ~0UL first..
- *
- *extern inline unsigned long ffz(unsigned long word)
- *{
- * __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
- * : "=d" (word)
- * : "d" (~(word)));
- * return word;
- *}
- */
-_GLOBAL(ffz)
- mr r4,r3
- li r3,0
-10: andi. r0,r4,1 /* Find the zero we know is there */
- srawi r4,r4,1
- beq 90f
- addi r3,r3,1
- b 10b
-90: blr
/*
* Extended precision shifts
@@ -605,43 +288,7 @@
slw r3,r3,r5 /* YYY--- */
or r3,r3,r7 /* YYYZZZ */
blr
-
-_GLOBAL(abort)
- .long 0
-
-/* in include/asm/string.h now -- Cort */
-#if 0
-_GLOBAL(bzero)
-#define bufp r3
-#define len r4
-#define pat r5
-/* R3 has buffer */
-/* R4 has length */
-/* R5 has pattern */
- cmpi 0,len,0 /* Exit if len <= 0 */
- ble 99f
- andi. r0,bufp,3 /* Must be on longword boundary */
- bne 10f /* Use byte loop if not aligned */
- andi. r0,len,3 /* Check for overrage */
- subi bufp,bufp,4 /* Adjust pointer */
- srawi len,len,2 /* Divide by 4 */
- blt 99f /* If negative - bug out! */
- mtspr CTR,len /* Set up counter */
- li pat,0
-00: stwu pat,4(bufp) /* Store value */
- bdnz 00b /* Loop [based on counter] */
- mr len,r0 /* Get remainder (bytes) */
-10: cmpi 0,len,0 /* Any bytes left */
- ble 99f /* No - all done */
- mtspr CTR,len /* Set up counter */
- subi bufp,bufp,1 /* Adjust pointer */
- li pat,0
-20: stbu pat,1(bufp) /* Store value */
- bdnz 20b /* Loop [based on counter] */
-99: blr
-#endif
-
_GLOBAL(abs)
cmpi 0,r3,0
bge 10f
@@ -651,39 +298,72 @@
_GLOBAL(_get_SP)
mr r3,r1 /* Close enough */
blr
-
-_GLOBAL(_get_SDR1)
- mfspr r3,SDR1
+
+_GLOBAL(_get_PVR)
+ mfspr r3,PVR
blr
-_GLOBAL(_get_SRx)
- mfsrin r3,r3
+_GLOBAL(cvt_fd)
+cvt_fd:
+ lfs 0,0(r3)
+ stfd 0,0(r4)
+ blr
+/*
+ * Fetch the current SR register
+ * get_SR(int index)
+ */
+_GLOBAL(get_SR)
+ mfsrin r4,r3
+ mr r3,r4
blr
-_GLOBAL(_get_PVR)
- mfspr r3,PVR
+
+_GLOBAL(cvt_df)
+cvt_df:
+ lfd 0,0(r3)
+ stfs 0,0(r4)
blr
/*
* Create a kernel thread
* __kernel_thread(flags, fn, arg)
*/
-#if 1
-#define SYS_CLONE 120
_GLOBAL(__kernel_thread)
-__kernel_thread:
- li r0,SYS_CLONE
+ li r0,__NR_clone
sc
cmpi 0,r3,0 /* parent or child? */
bnelr /* return if parent */
mtlr r4 /* fn addr in lr */
mr r3,r5 /* load arg and call fn */
- blr
- li 0, 1 /* exit after child exits */
- li 3, 0
+ blrl
+ li r0,__NR_exit /* exit after child exits */
+ li r3,0
sc
-#endif
-
+
+#define SYSCALL(name) \
+_GLOBAL(name) \
+ li r0,__NR_##name; \
+ sc; \
+ bnslr; \
+ lis r4,errno@ha; \
+ stw r3,errno@l(r4); \
+ li r3,-1; \
+ blr
+
+#define __NR__exit __NR_exit
+
+SYSCALL(idle)
+SYSCALL(setup)
+SYSCALL(sync)
+SYSCALL(setsid)
+SYSCALL(write)
+SYSCALL(dup)
+SYSCALL(execve)
+SYSCALL(open)
+SYSCALL(close)
+SYSCALL(waitpid)
+
+
/* Why isn't this a) automatic, b) written in 'C'? */
.data
.align 4
@@ -694,12 +374,12 @@
.long sys_fork
.long sys_read
.long sys_write
- .long sys_open /* 5 */
+ .long sys_open /* 5 */
.long sys_close
.long sys_waitpid
.long sys_creat
.long sys_link
- .long sys_unlink /* 10 */
+ .long sys_unlink /* 10 */
.long sys_execve
.long sys_chdir
.long sys_time
@@ -709,7 +389,7 @@
.long sys_break
.long sys_stat
.long sys_lseek
- .long sys_getpid /* 20 */
+ .long sys_getpid /* 20 */
.long sys_mount
.long sys_umount
.long sys_setuid
@@ -734,12 +414,12 @@
.long sys_pipe
.long sys_times
.long sys_prof
- .long sys_brk /* 45 */
+ .long sys_brk /* 45 */
.long sys_setgid
.long sys_getgid
.long sys_signal
.long sys_geteuid
- .long sys_getegid /* 50 */
+ .long sys_getegid /* 50 */
.long sys_acct
.long sys_phys
.long sys_lock
@@ -754,57 +434,57 @@
.long sys_ustat
.long sys_dup2
.long sys_getppid
- .long sys_getpgrp /* 65 */
+ .long sys_getpgrp /* 65 */
.long sys_setsid
.long sys_sigaction
.long sys_sgetmask
.long sys_ssetmask
- .long sys_setreuid /* 70 */
+ .long sys_setreuid /* 70 */
.long sys_setregid
.long sys_sigsuspend
.long sys_sigpending
.long sys_sethostname
- .long sys_setrlimit /* 75 */
+ .long sys_setrlimit /* 75 */
.long sys_getrlimit
.long sys_getrusage
.long sys_gettimeofday
.long sys_settimeofday
- .long sys_getgroups /* 80 */
+ .long sys_getgroups /* 80 */
.long sys_setgroups
- .long sys_select
+ .long ppc_select
.long sys_symlink
.long sys_lstat
- .long sys_readlink /* 85 */
+ .long sys_readlink /* 85 */
.long sys_uselib
.long sys_swapon
.long sys_reboot
.long old_readdir /* was sys_readdir */
- .long sys_mmap /* 90 */
+ .long sys_mmap /* 90 */
.long sys_munmap
.long sys_truncate
.long sys_ftruncate
.long sys_fchmod
- .long sys_fchown /* 95 */
+ .long sys_fchown /* 95 */
.long sys_getpriority
.long sys_setpriority
.long sys_profil
.long sys_statfs
- .long sys_fstatfs /* 100 */
+ .long sys_fstatfs /* 100 */
.long sys_ioperm
.long sys_socketcall
.long sys_syslog
.long sys_setitimer
- .long sys_getitimer /* 105 */
+ .long sys_getitimer /* 105 */
.long sys_newstat
.long sys_newlstat
.long sys_newfstat
.long sys_uname
- .long sys_iopl /* 110 */
+ .long sys_iopl /* 110 */
.long sys_vhangup
.long sys_idle
.long sys_vm86
.long sys_wait4
- .long sys_swapoff /* 115 */
+ .long sys_swapoff /* 115 */
.long sys_sysinfo
.long sys_ipc
.long sys_fsync
@@ -814,7 +494,7 @@
.long sys_newuname
.long sys_modify_ldt
.long sys_adjtimex
- .long sys_mprotect /* 125 */
+ .long sys_mprotect /* 125 */
.long sys_sigprocmask
.long sys_create_module
.long sys_init_module
@@ -829,9 +509,9 @@
.long 0 /* for afs_syscall */
.long sys_setfsuid
.long sys_setfsgid
- .long sys_llseek /* 140 */
+ .long sys_llseek /* 140 */
.long sys_getdents
- .long sys_newselect
+ .long ppc_select
.long sys_flock
.long sys_msync
.long sys_readv /* 145 */
@@ -844,7 +524,7 @@
.long sys_mlockall
.long sys_munlockall
.long sys_sched_setparam
- .long sys_sched_getparam /* 155 */
+ .long sys_sched_getparam /* 155 */
.long sys_sched_setscheduler
.long sys_sched_getscheduler
.long sys_sched_yield
@@ -853,8 +533,11 @@
.long sys_sched_rr_get_interval
.long sys_nanosleep
.long sys_mremap
- .long SYMBOL_NAME(sys_setresuid)
- .long SYMBOL_NAME(sys_getresuid)
- .long SYMBOL_NAME(sys_nfsservctl)
- .space (NR_syscalls-166)*4
+ .long sys_setresuid
+ .long sys_getresuid /* 165 */
+ .long sys_query_module
+ .long sys_poll
+ .long sys_nfsservctl
+ .long sys_debug
+ .space (NR_syscalls-170)*4
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov