patch-2.1.112 linux/kernel/sched.c
Next file: linux/mm/memory.c
Previous file: linux/kernel/kmod.c
Back to the patch index
Back to the overall index
- Lines: 65
- Date:
Mon Jul 27 22:53:40 1998
- Orig file:
v2.1.111/linux/kernel/sched.c
- Orig date:
Sun Jul 26 11:57:19 1998
diff -u --recursive --new-file v2.1.111/linux/kernel/sched.c linux/kernel/sched.c
@@ -105,10 +105,19 @@
static inline void reschedule_idle(struct task_struct * p)
{
+
/*
* For SMP, we try to see if the CPU the task used
* to run on is idle..
*/
+#if 0
+ /*
+ * Disable this for now. Ingo has some interesting
+ * code that looks too complex, and I have some ideas,
+ * but in the meantime.. One problem is that "wakeup()"
+ * can be (and is) called before we've even initialized
+ * SMP completely, so..
+ */
#ifdef __SMP__
int want_cpu = p->processor;
@@ -132,6 +141,7 @@
} while (--i > 0);
}
#endif
+#endif
if (p->policy != SCHED_OTHER || p->counter > current->counter + 3)
current->need_resched = 1;
}
@@ -437,16 +447,15 @@
*/
asmlinkage void schedule(void)
{
- int lock_depth;
struct task_struct * prev, * next;
unsigned long timeout;
int this_cpu;
prev = current;
- this_cpu = smp_processor_id();
+ this_cpu = prev->processor;
if (in_interrupt())
goto scheduling_in_interrupt;
- release_kernel_lock(prev, this_cpu, lock_depth);
+ release_kernel_lock(prev, this_cpu);
if (bh_active & bh_mask)
do_bottom_half();
@@ -454,6 +463,7 @@
spin_lock_irq(&runqueue_lock);
/* move an exhausted RR process to be last.. */
+ prev->need_resched = 0;
if (!prev->counter && prev->policy == SCHED_RR) {
prev->counter = prev->priority;
move_last_runqueue(prev);
@@ -551,8 +561,7 @@
* switched into it (from an even more "previous"
* prev)
*/
- prev->need_resched = 0;
- reacquire_kernel_lock(prev, smp_processor_id(), lock_depth);
+ reacquire_kernel_lock(prev);
return;
scheduling_in_interrupt:
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov