patch-2.2.8 linux/arch/ppc/kernel/head.S
Next file: linux/arch/ppc/kernel/i8259.c
Previous file: linux/arch/ppc/kernel/chrp_setup.c
Back to the patch index
Back to the overall index
- Lines: 269
- Date:
Tue May 11 08:24:32 1999
- Orig file:
v2.2.7/linux/arch/ppc/kernel/head.S
- Orig date:
Tue Mar 23 14:35:46 1999
diff -u --recursive --new-file v2.2.7/linux/arch/ppc/kernel/head.S linux/arch/ppc/kernel/head.S
@@ -1,7 +1,7 @@
/*
* arch/ppc/kernel/head.S
*
- * $Id: head.S,v 1.121 1999/03/16 10:40:29 cort Exp $
+ * $Id: head.S,v 1.130 1999/05/09 19:16:43 cort Exp $
*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
@@ -91,7 +91,7 @@
#define tlbia \
li r4,128; \
mtctr r4; \
- lis r4,0xC000; \
+ lis r4,KERNELBASE@h; \
0: tlbie r4; \
addi r4,r4,0x1000; \
bdnz 0b
@@ -415,7 +415,7 @@
* this, we leave this much untouched space on the stack on exception
* entry.
*/
-#define STACK_UNDERHEAD 64
+#define STACK_UNDERHEAD 0
/*
* Exception entry code. This code runs with address translation
@@ -1495,27 +1495,25 @@
* On SMP we know the fpu is free, since we give it up every
* switch. -- Cort
*/
+ mfmsr r5
+ ori r5,r5,MSR_FP
+ SYNC
+ mtmsr r5 /* enable use of fpu now */
+ SYNC
+/*
+ * For SMP, we don't do lazy FPU switching because it just gets too
+ * horrendously complex, especially when a task switches from one CPU
+ * to another. Instead we call giveup_fpu in switch_to.
+ */
+#ifndef __SMP__
#ifndef CONFIG_APUS
lis r6,-KERNELBASE@h
#else
lis r6,CYBERBASEp@h
lwz r6,0(r6)
#endif
-
addis r3,r6,last_task_used_math@ha
lwz r4,last_task_used_math@l(r3)
- mfmsr r5
- ori r5,r5,MSR_FP
- SYNC
- mtmsr r5 /* enable use of fpu now */
-/*
- * All the saving of last_task_used_math is handled
- * by a switch_to() call to smp_giveup_fpu() in SMP so
- * last_task_used_math is not used.
- * -- Cort
- */
-#ifndef __SMP__
- SYNC
cmpi 0,r4,0
beq 1f
add r4,r4,r6
@@ -1529,15 +1527,17 @@
li r20,MSR_FP|MSR_FE0|MSR_FE1
andc r4,r4,r20 /* disable FP for previous task */
stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+1:
#endif /* __SMP__ */
-1: ori r23,r23,MSR_FP|MSR_FE0|MSR_FE1 /* enable use of FP after return */
+ /* enable use of FP after return */
+ ori r23,r23,MSR_FP|MSR_FE0|MSR_FE1
mfspr r5,SPRG3 /* current task's TSS (phys) */
lfd fr0,TSS_FPSCR-4(r5)
mtfsf 0xff,fr0
REST_32FPRS(0, r5)
+#ifndef __SMP__
subi r4,r5,TSS
sub r4,r4,r6
-#ifndef __SMP__
stw r4,last_task_used_math@l(r3)
#endif /* __SMP__ */
/* restore registers and return */
@@ -1574,48 +1574,44 @@
.align 4
/*
- * Disable FP for the task which had the FPU previously,
- * and save its floating-point registers in its thread_struct.
+ * giveup_fpu(tsk)
+ * Disable FP for the task given as the argument,
+ * and save the floating-point registers in its thread_struct.
* Enables the FPU for use in the kernel on return.
*/
-/* smp_giveup_fpu() takes an arg to tell it where to save the fpu
- * regs since last_task_used_math can't be trusted (many many race
- * conditions). -- Cort
- */
- .globl smp_giveup_fpu
-smp_giveup_fpu:
- mr r4,r3
- b 12f
.globl giveup_fpu
giveup_fpu:
- lis r3,last_task_used_math@ha
- lwz r4,last_task_used_math@l(r3)
-12:
mfmsr r5
ori r5,r5,MSR_FP
SYNC
mtmsr r5 /* enable use of fpu now */
SYNC
- cmpi 0,r4,0
+ cmpi 0,r3,0
beqlr- /* if no previous owner, done */
- addi r4,r4,TSS /* want TSS of last_task_used_math */
+ addi r3,r3,TSS /* want TSS of task */
+ lwz r5,PT_REGS(r3)
+ cmpi 0,r5,0
+ SAVE_32FPRS(0, r3)
+ mffs fr0
+ stfd fr0,TSS_FPSCR-4(r3)
+ beq 1f
+ lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+ li r3,MSR_FP|MSR_FE0|MSR_FE1
+ andc r4,r4,r3 /* disable FP for previous task */
+ stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+1:
#ifndef __SMP__
li r5,0
- stw r5,last_task_used_math@l(r3)
+ lis r4,last_task_used_math@ha
+ stw r5,last_task_used_math@l(r4)
#endif /* __SMP__ */
- SAVE_32FPRS(0, r4)
- mffs fr0
- stfd fr0,TSS_FPSCR-4(r4)
- lwz r5,PT_REGS(r4)
- lwz r3,_MSR-STACK_FRAME_OVERHEAD(r5)
- li r4,MSR_FP|MSR_FE0|MSR_FE1
- andc r3,r3,r4 /* disable FP for previous task */
- stw r3,_MSR-STACK_FRAME_OVERHEAD(r5)
+ blr
+
#else /* CONFIG_8xx */
.globl giveup_fpu
giveup_fpu:
-#endif /* CONFIG_8xx */
blr
+#endif /* CONFIG_8xx */
/*
* This code is jumped to from the startup code to copy
@@ -2049,8 +2045,9 @@
stw r0,GPR0(r1)
lwz r0,0(r1)
stw r0,GPR1(r1)
- SAVE_10GPRS(2, r1)
- SAVE_10GPRS(12, r1)
+ /* r3-r13 are caller saved -- Cort */
+ SAVE_GPR(2, r1)
+ SAVE_8GPRS(14, r1)
SAVE_10GPRS(22, r1)
mflr r20 /* Return to switch caller */
mfmsr r22
@@ -2073,6 +2070,8 @@
mtspr SPRG3,r0 /* Update current TSS phys addr */
SYNC
lwz r1,KSP(r4) /* Load new stack pointer */
+ /* save the old current 'last' for return value */
+ mr r3,r2
addi r2,r4,-TSS /* Update current */
#ifndef CONFIG_8xx
/* Set up segment registers for new task */
@@ -2080,39 +2079,62 @@
addis r5,r5,0x6000 /* Set Ks, Ku bits */
li r0,12 /* TASK_SIZE / SEGMENT_SIZE */
mtctr r0
- li r3,0
-3: mtsrin r5,r3
+ li r9,0
+3: mtsrin r5,r9
addi r5,r5,1 /* next VSID */
- addis r3,r3,0x1000 /* address of next segment */
+ addis r9,r9,0x1000 /* address of next segment */
bdnz 3b
#else
/* On the MPC8xx, we place the physical address of the new task
* page directory loaded into the MMU base register, and set the
* ASID compare register with the new "context".
*/
- lwz r3,MM-TSS(r4) /* Get virtual address of mm */
- lwz r3,PGD(r3) /* get new->mm->pgd */
- addis r3,r3,-KERNELBASE@h /* convert to phys addr */
- mtspr M_TWB, r3 /* Update MMU base address */
+ lwz r9,MM-TSS(r4) /* Get virtual address of mm */
+ lwz r9,PGD(r9) /* get new->mm->pgd */
+ addis r9,r9,-KERNELBASE@h /* convert to phys addr */
+ mtspr M_TWB, r9 /* Update MMU base address */
mtspr M_CASID, r5 /* Update context */
tlbia
#endif
SYNC
-
-/* FALL THROUGH into int_return */
-#ifdef __SMP__
- /* call schedule_tail if this is the first time for a child process */
- lwz r5,TSS_SMP_FORK_RET(r4)
- cmpi 0,r5,0
- beq+ int_return
- li r3,0
- stw r3,TSS_SMP_FORK_RET(r4)
- bl schedule_tail
-#endif /* __SMP__ */
+2: lwz r9,_MSR(r1) /* Returning to user mode? */
+ andi. r9,r9,MSR_PR
+ beq+ 10f /* if not, don't adjust kernel stack */
+8: addi r4,r1,INT_FRAME_SIZE+STACK_UNDERHEAD /* size of frame */
+ stw r4,TSS+KSP(r2) /* save kernel stack pointer */
+ tophys(r9,r1,r9)
+ mtspr SPRG2,r9 /* phys exception stack pointer */
+10: lwz r2,_CTR(r1)
+ lwz r0,_LINK(r1)
+ mtctr r2
+ mtlr r0
+ lwz r2,_XER(r1)
+ lwz r0,_CCR(r1)
+ mtspr XER,r2
+ mtcrf 0xFF,r0
+ /* r3-r13 are destroyed -- Cort */
+ REST_GPR(14, r1)
+ REST_8GPRS(15, r1)
+ REST_8GPRS(23, r1)
+ REST_GPR(31, r1)
+ lwz r2,_NIP(r1) /* Restore environment */
+ lwz r0,_MSR(r1)
+ mtspr SRR0,r2
+ mtspr SRR1,r0
+ lwz r0,GPR0(r1)
+ lwz r2,GPR2(r1)
+ lwz r1,GPR1(r1)
+ SYNC
+ rfi
/*
* Trap exit.
*/
+#ifdef __SMP__
+ .globl ret_from_smpfork
+ret_from_smpfork:
+ bl schedule_tail
+#endif
.globl ret_from_syscall
ret_from_syscall:
.globl int_return
@@ -2127,8 +2149,8 @@
lwz r5,_MSR(r1)
and. r5,r5,r4
beq 2f
-3: lis r4,n_lost_interrupts@ha
- lwz r4,n_lost_interrupts@l(r4)
+3: lis r4,ppc_n_lost_interrupts@ha
+ lwz r4,ppc_n_lost_interrupts@l(r4)
cmpi 0,r4,0
beq+ 1f
addi r3,r1,STACK_FRAME_OVERHEAD
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)