patch-2.2.8 linux/arch/ppc/kernel/process.c
Next file: linux/arch/ppc/kernel/prom.c
Previous file: linux/arch/ppc/kernel/prep_time.c
Back to the patch index
Back to the overall index
- Lines: 208
- Date:
Tue May 11 08:24:32 1999
- Orig file:
v2.2.7/linux/arch/ppc/kernel/process.c
- Orig date:
Tue Mar 23 14:35:46 1999
diff -u --recursive --new-file v2.2.7/linux/arch/ppc/kernel/process.c linux/arch/ppc/kernel/process.c
@@ -1,5 +1,5 @@
/*
- * $Id: process.c,v 1.75 1999/02/12 07:06:29 cort Exp $
+ * $Id: process.c,v 1.83 1999/05/10 04:43:43 cort Exp $
*
* linux/arch/ppc/kernel/process.c
*
@@ -43,10 +43,7 @@
#include <asm/prom.h>
int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs);
-void switch_to(struct task_struct *, struct task_struct *);
-
extern unsigned long _get_SP(void);
-extern spinlock_t scheduler_lock;
struct task_struct *last_task_used_math = NULL;
static struct vm_area_struct init_mmap = INIT_MMAP;
@@ -77,17 +74,25 @@
int
dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs)
{
-#ifdef __SMP__
- if ( regs->msr & MSR_FP )
- smp_giveup_fpu(current);
-#else
- if (last_task_used_math == current)
- giveup_fpu();
-#endif
+ if (regs->msr & MSR_FP)
+ giveup_fpu(current);
memcpy(fpregs, ¤t->tss.fpr[0], sizeof(*fpregs));
return 1;
}
+void
+enable_kernel_fp(void)
+{
+#ifdef __SMP__
+ if (current->tss.regs && (current->tss.regs->msr & MSR_FP))
+ giveup_fpu(current);
+ else
+ giveup_fpu(NULL); /* just enables FP for kernel */
+#else
+ giveup_fpu(last_task_used_math);
+#endif /* __SMP__ */
+}
+
/* check to make sure the kernel stack is healthy */
int check_stack(struct task_struct *tsk)
{
@@ -152,7 +157,8 @@
}
void
-switch_to(struct task_struct *prev, struct task_struct *new)
+_switch_to(struct task_struct *prev, struct task_struct *new,
+ struct task_struct **last)
{
struct thread_struct *new_tss, *old_tss;
int s = _disable_interrupts();
@@ -162,10 +168,10 @@
#endif
#ifdef SHOW_TASK_SWITCHES
- printk("%s/%d -> %s/%d NIP %08lx cpu %d lock %x root %x/%x\n",
+ printk("%s/%d -> %s/%d NIP %08lx cpu %d root %x/%x\n",
prev->comm,prev->pid,
new->comm,new->pid,new->tss.regs->nip,new->processor,
- scheduler_lock.lock,new->fs->root,prev->fs->root);
+ new->fs->root,prev->fs->root);
#endif
#ifdef __SMP__
/* avoid complexity of lazy save/restore of fpu
@@ -173,18 +179,19 @@
* this task used the fpu during the last quantum.
*
* If it tries to use the fpu again, it'll trap and
- * reload its fp regs.
+ * reload its fp regs. So we don't have to do a restore
+ * every switch, just a save.
* -- Cort
*/
- if ( prev->tss.regs->msr & MSR_FP )
- smp_giveup_fpu(prev);
+ if (prev->tss.regs && (prev->tss.regs->msr & MSR_FP))
+ giveup_fpu(prev);
prev->last_processor = prev->processor;
current_set[smp_processor_id()] = new;
#endif /* __SMP__ */
new_tss = &new->tss;
old_tss = ¤t->tss;
- _switch(old_tss, new_tss, new->mm->context);
+ *last = _switch(old_tss, new_tss, new->mm->context);
_enable_interrupts(s);
}
@@ -237,7 +244,12 @@
printk("Instruction DUMP:");
for(i = -3; i < 6; i++)
- printk("%c%08lx%c",i?' ':'<',pc[i],i?' ':'>');
+ {
+ unsigned long p;
+ if (__get_user( p, &pc[i] ))
+ break;
+ printk("%c%08lx%c",i?' ':'<',p,i?' ':'>');
+ }
printk("\n");
}
@@ -265,8 +277,12 @@
copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
struct task_struct * p, struct pt_regs * regs)
{
- struct pt_regs * childregs;
-
+ struct pt_regs * childregs, *kregs;
+#ifdef __SMP__
+ extern void ret_from_smpfork(void);
+#else
+ extern void ret_from_syscall(void);
+#endif
/* Copy registers */
childregs = ((struct pt_regs *)
((unsigned long)p + sizeof(union task_union)
@@ -275,8 +291,19 @@
if ((childregs->msr & MSR_PR) == 0)
childregs->gpr[2] = (unsigned long) p; /* `current' in new task */
childregs->gpr[3] = 0; /* Result from fork() */
+ p->tss.regs = childregs;
p->tss.ksp = (unsigned long) childregs - STACK_FRAME_OVERHEAD;
- p->tss.regs = childregs;
+ p->tss.ksp -= sizeof(struct pt_regs ) + STACK_FRAME_OVERHEAD;
+ kregs = (struct pt_regs *)(p->tss.ksp + STACK_FRAME_OVERHEAD);
+#ifdef __SMP__
+ kregs->nip = (unsigned long)ret_from_smpfork;
+#else
+ kregs->nip = (unsigned long)ret_from_syscall;
+#endif
+ kregs->msr = MSR_KERNEL;
+ kregs->gpr[1] = (unsigned long)childregs - STACK_FRAME_OVERHEAD;
+ kregs->gpr[2] = (unsigned long)p;
+
if (usp >= (unsigned long) regs) {
/* Stack is in kernel space - must adjust */
childregs->gpr[1] = (unsigned long)(childregs + 1);
@@ -290,21 +317,14 @@
* copy fpu info - assume lazy fpu switch now always
* -- Cort
*/
-#ifdef __SMP__
- if ( regs->msr & MSR_FP )
- smp_giveup_fpu(current);
-#else
- if ( last_task_used_math == current )
- giveup_fpu();
-#endif
+ if (regs->msr & MSR_FP)
+ giveup_fpu(current);
memcpy(&p->tss.fpr, ¤t->tss.fpr, sizeof(p->tss.fpr));
p->tss.fpscr = current->tss.fpscr;
childregs->msr &= ~MSR_FP;
#ifdef __SMP__
- if ( (p->pid != 0) || !(clone_flags & CLONE_PID) )
- p->tss.smp_fork_ret = 1;
p->last_processor = NO_PROC_ID;
#endif /* __SMP__ */
return 0;
@@ -371,11 +391,6 @@
int res;
lock_kernel();
res = do_fork(clone_flags, regs->gpr[1], regs);
- /*
- * only parent returns here, child returns to either
- * syscall_ret_1() or kernel_thread()
- * -- Cort
- */
#ifdef __SMP__
/* When we clone the idle task we keep the same pid but
* the return value of 0 for both causes problems.
@@ -395,7 +410,6 @@
int res;
res = do_fork(SIGCHLD, regs->gpr[1], regs);
- /* only parent returns here */
#ifdef __SMP__
/* When we clone the idle task we keep the same pid but
* the return value of 0 for both causes problems.
@@ -424,13 +438,8 @@
error = PTR_ERR(filename);
if (IS_ERR(filename))
goto out;
-#ifdef __SMP__
- if ( regs->msr & MSR_FP )
- smp_giveup_fpu(current);
-#else
- if ( last_task_used_math == current )
- giveup_fpu();
-#endif
+ if (regs->msr & MSR_FP)
+ giveup_fpu(current);
error = do_execve(filename, (char **) a1, (char **) a2, regs);
putname(filename);
out:
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)