patch-2.1.107 linux/arch/i386/kernel/smp.c
Next file: linux/arch/i386/kernel/sys_i386.c
Previous file: linux/arch/i386/kernel/setup.c
Back to the patch index
Back to the overall index
- Lines: 216
- Date:
Wed Jun 24 14:30:08 1998
- Orig file:
v2.1.106/linux/arch/i386/kernel/smp.c
- Orig date:
Tue Jun 23 10:01:19 1998
diff -u --recursive --new-file v2.1.106/linux/arch/i386/kernel/smp.c linux/arch/i386/kernel/smp.c
@@ -64,19 +64,19 @@
/*
* Some notes on processor bugs:
*
- * Pentium and Pentium Pro (and all CPU's) have bugs. The Linux issues
+ * Pentium and Pentium Pro (and all CPUs) have bugs. The Linux issues
* for SMP are handled as follows.
*
* Pentium Pro
* Occasional delivery of 'spurious interrupt' as trap #16. This
- * is very very rare. The kernel logs the event and recovers
+ * is very rare. The kernel logs the event and recovers
*
* Pentium
* There is a marginal case where REP MOVS on 100MHz SMP
* machines with B stepping processors can fail. XXX should provide
* an L1cache=Writethrough or L1cache=off option.
*
- * B stepping CPU's may hang. There are hardware work arounds
+ * B stepping CPUs may hang. There are hardware work arounds
* for this. We warn about it in case your board doesnt have the work
* arounds. Basically thats so I can tell anyone with a B stepping
* CPU and SMP problems "tough".
@@ -116,15 +116,15 @@
static int max_cpus = -1; /* Setup configured maximum number of CPUs to activate */
int smp_found_config=0; /* Have we found an SMP box */
-unsigned long cpu_present_map = 0; /* Bitmask of existing CPU's */
-int smp_num_cpus = 1; /* Total count of live CPU's */
+unsigned long cpu_present_map = 0; /* Bitmask of existing CPUs */
+int smp_num_cpus = 1; /* Total count of live CPUs */
int smp_threads_ready=0; /* Set when the idlers are all forked */
volatile int cpu_number_map[NR_CPUS]; /* which CPU maps to which logical number */
volatile int __cpu_logical_map[NR_CPUS]; /* which logical number maps to which CPU */
volatile unsigned long cpu_callin_map[NR_CPUS] = {0,}; /* We always use 0 the rest is ready for parallel delivery */
volatile unsigned long smp_invalidate_needed; /* Used for the invalidate map that's also checked in the spinlock */
-volatile unsigned long kstack_ptr; /* Stack vector for booting CPU's */
-struct cpuinfo_x86 cpu_data[NR_CPUS]; /* Per cpu bogomips and other parameters */
+volatile unsigned long kstack_ptr; /* Stack vector for booting CPUs */
+struct cpuinfo_x86 cpu_data[NR_CPUS]; /* Per CPU bogomips and other parameters */
static unsigned int num_processors = 1; /* Internal processor count */
static unsigned long io_apic_addr = 0xFEC00000; /* Address of the I/O apic (not yet used) */
unsigned char boot_cpu_id = 0; /* Processor that is doing the boot up */
@@ -140,7 +140,7 @@
volatile unsigned long kernel_counter=0; /* Number of times the processor holds the lock */
volatile unsigned long syscall_count=0; /* Number of times the processor holds the syscall lock */
-volatile unsigned long ipi_count; /* Number of IPI's delivered */
+volatile unsigned long ipi_count; /* Number of IPIs delivered */
volatile unsigned long smp_proc_in_lock[NR_CPUS] = {0,};/* for computing process time */
volatile int smp_process_available=0;
@@ -396,7 +396,7 @@
}
}
if(apics>1)
- printk("Warning: Multiple APIC's not supported.\n");
+ printk("Warning: Multiple APICs not supported.\n");
return num_processors;
}
@@ -448,7 +448,7 @@
*
* HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK
*
- * It's not just a crazy hack... ;-)
+ * It's not just a crazy hack. ;-)
*/
/*
* Standard page mapping
@@ -614,17 +614,17 @@
/*
* Architecture specific routine called by the kernel just before init is
* fired off. This allows the BP to have everything in order [we hope].
- * At the end of this all the AP's will hit the system scheduling and off
+ * At the end of this all the APs will hit the system scheduling and off
* we go. Each AP will load the system gdt's and jump through the kernel
* init into idle(). At this point the scheduler will one day take over
* and give them jobs to do. smp_callin is a standard routine
- * we use to track CPU's as they power up.
+ * we use to track CPUs as they power up.
*/
__initfunc(void smp_commence(void))
{
/*
- * Lets the callin's below out of their loop.
+ * Lets the callins below out of their loop.
*/
SMP_PRINTK(("Setting commenced=1, go go go\n"));
smp_commenced=1;
@@ -705,7 +705,7 @@
/*
* Everything has been set up for the secondary
- * CPU's - they just need to reload everything
+ * CPUs - they just need to reload everything
* from the task structure
*/
__initfunc(void initialize_secondary(void))
@@ -928,7 +928,7 @@
unsigned int prof_counter[NR_CPUS];
/*
- * Cycle through the processors sending APIC IPI's to boot each.
+ * Cycle through the processors sending APIC IPIs to boot each.
*/
__initfunc(void smp_boot_cpus(void))
@@ -941,7 +941,7 @@
mtrr_init_boot_cpu ();
#endif
/*
- * Initialize the logical to physical cpu number mapping
+ * Initialize the logical to physical CPU number mapping
* and the per-CPU profiling counter/multiplier
*/
@@ -1031,7 +1031,7 @@
setup_APIC_clock ();
/*
- * Now scan the cpu present map and fire up the other CPUs.
+ * Now scan the CPU present map and fire up the other CPUs.
*/
SMP_PRINTK(("CPU map: %lx\n", cpu_present_map));
@@ -1161,16 +1161,16 @@
}
/*
- * A non wait message cannot pass data or cpu source info. This current setup
+ * A non wait message cannot pass data or CPU source info. This current setup
* is only safe because the kernel lock owner is the only person who can send
* a message.
*
* Wrapping this whole block in a spinlock is not the safe answer either. A
- * processor may get stuck with irq's off waiting to send a message and thus
- * not replying to the person spinning for a reply....
+ * processor may get stuck with IRQs off waiting to send a message and thus
+ * not replying to the person spinning for a reply.
*
- * In the end flush tlb ought to be the NMI and a very very short function
- * (to avoid the old IDE disk problems), and other messages sent with IRQ's
+ * In the end flush tlb ought to be the NMI and a very short function
+ * (to avoid the old IDE disk problems), and other messages sent with IRQs
* enabled in a civilised fashion. That will also boost performance.
*/
@@ -1223,15 +1223,15 @@
}
/*
- * Sanity check we don't re-enter this across CPU's. Only the kernel
- * lock holder may send messages. For a STOP_CPU we are bringing the
- * entire box to the fastest halt we can.. A reschedule carries
- * no data and can occur during a flush.. guess what panic
- * I got to notice this bug...
+ * Sanity check we don't re-enter this across CPUs. Only the kernel
+ * lock holder may send messages. For a STOP_CPU we are bringing the
+ * entire box to the fastest halt we can. A reschedule carries
+ * no data and can occur during a flush. Guess what panic
+ * I got to notice this bug.
*/
/*
- * We are busy
+ * We are busy.
*/
smp_cpu_in_msg[p]++;
@@ -1240,7 +1240,7 @@
p, msg, target);*/
/*
- * Wait for the APIC to become ready - this should never occur. Its
+ * Wait for the APIC to become ready - this should never occur. It's
* a debugging check really.
*/
@@ -1327,7 +1327,7 @@
/*
* This is fraught with deadlocks. Linus does a flush tlb at a whim
- * even with IRQ's off. We have to avoid a pair of crossing flushes
+ * even with IRQs off. We have to avoid a pair of crossing flushes
* or we are doomed. See the notes about smp_message_pass.
*/
@@ -1447,7 +1447,7 @@
* we might want to decouple profiling from the 'long path',
* and do the profiling totally in assembly.
*
- * Currently this isnt too much of an issue (performance wise),
+ * Currently this isn't too much of an issue (performance wise),
* we can take more than 100K local irqs per second on a 100 MHz P5.
*/
}
@@ -1617,7 +1617,7 @@
delta = curr_count-prev_count;
/*
- * This limit for delta seems arbitrary, but it isnt, it's
+ * This limit for delta seems arbitrary, but it isn't, it's
* slightly above the level of error a buggy Mercury/Neptune
* chipset timer can cause.
*/
@@ -1670,7 +1670,7 @@
#define LOOPS (HZ/10)
/*
- * let's wait LOOPS wraprounds:
+ * Let's wait LOOPS wraprounds:
*/
for (i=0; i<LOOPS; i++)
wait_8254_wraparound ();
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov