patch-1.3.38 linux/kernel/sched.c
Next file: linux/mm/filemap.c
Previous file: linux/kernel/ksyms.c
Back to the patch index
Back to the overall index
- Lines: 62
- Date:
Mon Oct 30 10:20:26 1995
- Orig file:
v1.3.37/linux/kernel/sched.c
- Orig date:
Sun Oct 29 11:38:49 1995
diff -u --recursive --new-file v1.3.37/linux/kernel/sched.c linux/kernel/sched.c
@@ -11,7 +11,6 @@
* current-task
*/
-#include <linux/config.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/timer.h>
@@ -183,7 +182,7 @@
{
int weight;
-#ifdef CONFIG_SMP
+#ifdef __SMP__
/* We are not permitted to run a task someone else is running */
if (p->processor != NO_PROC_ID)
return -1000;
@@ -199,7 +198,7 @@
weight = p->counter;
if (weight) {
-#ifdef CONFIG_SMP
+#ifdef __SMP__
/* Give a largish advantage to the same processor... */
/* (this is equivalent to penalizing other processors) */
if (p->last_processor == this_cpu)
@@ -261,7 +260,7 @@
p = init_task.next_run;
sti();
-#ifdef CONFIG_SMP
+#ifdef __SMP__
/*
* This is safe as we do not permit re-entry of schedule()
*/
@@ -288,7 +287,7 @@
for_each_task(p)
p->counter = (p->counter >> 1) + p->priority;
}
-#ifdef CONFIG_SMP
+#ifdef __SMP__
/*
* Context switching between two idle threads is pointless.
@@ -519,7 +518,7 @@
(*p)->state == TASK_UNINTERRUPTIBLE ||
(*p)->state == TASK_SWAPPING))
nr += FIXED_1;
-#ifdef CONFIG_SMP
+#ifdef __SMP__
nr-=(smp_num_cpus-1)*FIXED_1;
#endif
return nr;
@@ -978,7 +977,7 @@
*/
int cpu=smp_processor_id();
current_set[cpu]=&init_task;
-#ifdef CONFIG_SMP
+#ifdef __SMP__
init_task.processor=cpu;
#endif
bh_base[TIMER_BH].routine = timer_bh;
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov
with Sam's (original) version of this