patch-2.2.4 linux/arch/sparc64/mm/init.c
Next file: linux/arch/sparc64/prom/memory.c
Previous file: linux/arch/sparc64/mm/generic.c
Back to the patch index
Back to the overall index
- Lines: 770
- Date:
Wed Mar 10 16:53:37 1999
- Orig file:
v2.2.3/linux/arch/sparc64/mm/init.c
- Orig date:
Sun Nov 8 14:02:49 1998
diff -u --recursive --new-file v2.2.3/linux/arch/sparc64/mm/init.c linux/arch/sparc64/mm/init.c
@@ -1,8 +1,8 @@
-/* $Id: init.c,v 1.103 1998/10/20 03:09:12 jj Exp $
+/* $Id: init.c,v 1.124 1999/02/08 07:01:42 ecd Exp $
* arch/sparc64/mm/init.c
*
- * Copyright (C) 1996,1997 David S. Miller (davem@caip.rutgers.edu)
- * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <linux/config.h>
@@ -23,6 +23,7 @@
#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/vaddrs.h>
+#include <asm/dma.h>
/* Turn this off if you suspect some place in some physical memory hole
might get into page tables (something would be broken very much). */
@@ -34,6 +35,8 @@
struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
+unsigned long *sparc64_valid_addr_bitmap;
+
/* Ugly, but necessary... -DaveM */
unsigned long phys_base;
@@ -69,7 +72,7 @@
page->next_hash = NULL;
page->pprev_hash = NULL;
pgd_cache_size -= 2;
- free_page(PAGE_OFFSET + (page->map_nr << PAGE_SHIFT));
+ __free_page(page);
freed++;
if (page2)
page = page2->next_hash;
@@ -160,27 +163,31 @@
static unsigned long dvmaiobase = 0;
static unsigned long dvmaiosz __initdata = 0;
-/* #define E3000_DEBUG */
-
__initfunc(void dvmaio_init(void))
{
- int i;
+ long i;
if (!dvmaiobase) {
for (i = 0; sp_banks[i].num_bytes != 0; i++)
if (sp_banks[i].base_addr + sp_banks[i].num_bytes > dvmaiobase)
dvmaiobase = sp_banks[i].base_addr + sp_banks[i].num_bytes;
+
+ /* We map directly phys_base to phys_base+(4GB-DVMAIO_SIZE). */
+ dvmaiobase -= phys_base;
+
dvmaiobase = (dvmaiobase + DVMAIO_SIZE + 0x400000 - 1) & ~(0x400000 - 1);
for (i = 0; i < 6; i++)
- if (dvmaiobase <= ((1024 * 64 * 1024) << i))
+ if (dvmaiobase <= ((1024L * 64 * 1024) << i))
break;
- dvmaiobase = ((1024 * 64 * 1024) << i) - DVMAIO_SIZE;
+ dvmaiobase = ((1024L * 64 * 1024) << i) - DVMAIO_SIZE;
dvmaiosz = i;
}
}
__initfunc(void iommu_init(int iommu_node, struct linux_sbus *sbus))
{
+ extern int this_is_starfire;
+ extern void *starfire_hookup(int);
struct iommu_struct *iommu;
struct sysio_regs *sregs;
struct linux_prom64_registers rprop;
@@ -191,10 +198,6 @@
int err, i, j;
dvmaio_init();
-#ifdef E3000_DEBUG
- prom_printf("\niommu_init: [%x:%p] ",
- iommu_node, sbus);
-#endif
err = prom_getproperty(iommu_node, "reg", (char *)&rprop,
sizeof(rprop));
if(err == -1) {
@@ -203,19 +206,16 @@
}
sregs = (struct sysio_regs *) __va(rprop.phys_addr);
-#ifdef E3000_DEBUG
- prom_printf("sregs[%p]\n");
-#endif
if(!sregs) {
prom_printf("iommu_init: Fatal error, sysio regs not mapped\n");
prom_halt();
}
iommu = kmalloc(sizeof(struct iommu_struct), GFP_ATOMIC);
-
-#ifdef E3000_DEBUG
- prom_printf("iommu_init: iommu[%p] ", iommu);
-#endif
+ if (!iommu) {
+ prom_printf("iommu_init: Fatal error, kmalloc(iommu) failed\n");
+ prom_halt();
+ }
spin_lock_init(&iommu->iommu_lock);
iommu->sysio_regs = sregs;
@@ -224,14 +224,19 @@
control = sregs->iommu_control;
impl = (control & IOMMU_CTRL_IMPL) >> 60;
vers = (control & IOMMU_CTRL_VERS) >> 56;
-#ifdef E3000_DEBUG
- prom_printf("sreg_control[%08x]\n", control);
- prom_printf("IOMMU: IMPL[%x] VERS[%x] SYSIO mapped at %016lx\n",
- (unsigned int) impl, (unsigned int)vers, (unsigned long) sregs);
-#endif
printk("IOMMU(SBUS): IMPL[%x] VERS[%x] SYSIO mapped at %016lx\n",
(unsigned int) impl, (unsigned int)vers, (unsigned long) sregs);
+ /* Streaming buffer is unreliable on VERS 0 of SYSIO,
+ * although such parts were never shipped in production
+ * Sun hardware, I check just to be robust. --DaveM
+ */
+ vers = ((sregs->control & SYSIO_CONTROL_VER) >> 56);
+ if (vers == 0)
+ iommu->strbuf_enabled = 0;
+ else
+ iommu->strbuf_enabled = 1;
+
control &= ~(IOMMU_CTRL_TSBSZ);
control |= ((IOMMU_TSBSZ_2K * dvmaiosz) | IOMMU_CTRL_TBWSZ | IOMMU_CTRL_ENAB);
@@ -281,9 +286,14 @@
/* Setup aliased mappings... */
for(i = 0; i < (dvmaiobase >> 16); i++) {
- *iopte = (IOPTE_VALID | IOPTE_64K | IOPTE_STBUF |
- IOPTE_CACHE | IOPTE_WRITE);
- *iopte |= (i << 16);
+ unsigned long val = ((((unsigned long)i) << 16UL) + phys_base);
+
+ val |= IOPTE_VALID | IOPTE_64K | IOPTE_WRITE;
+ if (iommu->strbuf_enabled)
+ val |= IOPTE_STBUF;
+ else
+ val |= IOPTE_CACHE;
+ *iopte = val;
iopte++;
}
@@ -291,38 +301,34 @@
for( ; i < ((dvmaiobase + DVMAIO_SIZE) >> 16); i++)
*iopte++ = 0;
-#ifdef E3000_DEBUG
- prom_printf("IOMMU: pte's mapped, enabling IOMMU... ");
-#endif
sregs->iommu_tsbbase = __pa(tsbbase);
sregs->iommu_control = control;
-#ifdef E3000_DEBUG
- prom_printf("done\n");
-#endif
/* Get the streaming buffer going. */
control = sregs->sbuf_control;
impl = (control & SYSIO_SBUFCTRL_IMPL) >> 60;
vers = (control & SYSIO_SBUFCTRL_REV) >> 56;
-#ifdef E3000_DEBUG
- prom_printf("IOMMU: enabling streaming buffer, control[%08x]... ",
- control);
-#endif
- printk("IOMMU: Streaming Buffer IMPL[%x] REV[%x] ",
+ printk("IOMMU: Streaming Buffer IMPL[%x] REV[%x] ... ",
(unsigned int)impl, (unsigned int)vers);
- iommu->sbuf_flushflag_va = kmalloc(sizeof(unsigned long), GFP_DMA);
- printk("FlushFLAG[%016lx] ... ", (iommu->sbuf_flushflag_pa = __pa(iommu->sbuf_flushflag_va)));
- *(iommu->sbuf_flushflag_va) = 0;
-
- sregs->sbuf_control = (control | SYSIO_SBUFCTRL_SB_EN);
+ iommu->flushflag = 0;
-#ifdef E3000_DEBUG
- prom_printf("done, returning\n");
-#endif
- printk("ENABLED\n");
+ if (iommu->strbuf_enabled != 0) {
+ sregs->sbuf_control = (control | SYSIO_SBUFCTRL_SB_EN);
+ printk("ENABLED\n");
+ } else {
+ sregs->sbuf_control = (control & ~(SYSIO_SBUFCTRL_SB_EN));
+ printk("DISABLED\n");
+ }
/* Finally enable DVMA arbitration for all devices, just in case. */
sregs->sbus_control |= SYSIO_SBCNTRL_AEN;
+
+ /* If necessary, hook us up for starfire IRQ translations. */
+ sbus->upaid = prom_getintdefault(sbus->prom_node, "upa-portid", -1);
+ if(this_is_starfire)
+ sbus->starfire_cookie = starfire_hookup(sbus->upaid);
+ else
+ sbus->starfire_cookie = NULL;
}
void mmu_map_dma_area(unsigned long addr, int len, __u32 *dvma_addr,
@@ -397,19 +403,35 @@
__u32 mmu_get_scsi_one(char *vaddr, unsigned long len, struct linux_sbus *sbus)
{
- __u32 sbus_addr = (__u32) __pa(vaddr);
+ struct iommu_struct *iommu = sbus->iommu;
+ struct sysio_regs *sregs = iommu->sysio_regs;
+ unsigned long start = (unsigned long) vaddr;
+ unsigned long end = PAGE_ALIGN(start + len);
+ unsigned long flags, tmp;
+ volatile u64 *sbctrl = (volatile u64 *) &sregs->sbus_control;
-#ifndef DEBUG_IOMMU
- return sbus_addr;
-#else
- if((sbus_addr < dvmaiobase) &&
- ((sbus_addr + len) < dvmaiobase))
- return sbus_addr;
-
- /* "can't happen"... GFP_DMA assures this. */
- panic("Very high scsi_one mappings should never happen.");
- return (__u32)0;
-#endif
+ start &= PAGE_MASK;
+ if (end > MAX_DMA_ADDRESS) {
+ printk("mmu_get_scsi_one: Bogus DMA buffer address [%016lx:%d]\n",
+ (unsigned long) vaddr, (int)len);
+ panic("DMA address too large, tell DaveM");
+ }
+
+ if (iommu->strbuf_enabled) {
+ spin_lock_irqsave(&iommu->iommu_lock, flags);
+ iommu->flushflag = 0;
+ while(start < end) {
+ sregs->sbuf_pflush = start;
+ start += PAGE_SIZE;
+ }
+ sregs->sbuf_fsync = __pa(&(iommu->flushflag));
+ tmp = *sbctrl;
+ while(iommu->flushflag == 0)
+ membar("#LoadLoad");
+ spin_unlock_irqrestore(&iommu->iommu_lock, flags);
+ }
+
+ return sbus_dvma_addr(vaddr);
}
void mmu_release_scsi_one(u32 vaddr, unsigned long len, struct linux_sbus *sbus)
@@ -418,43 +440,89 @@
struct sysio_regs *sregs = iommu->sysio_regs;
unsigned long start = (unsigned long) vaddr;
unsigned long end = PAGE_ALIGN(start + len);
- unsigned long flags;
- unsigned int *sync_word;
+ unsigned long flags, tmp;
+ volatile u64 *sbctrl = (volatile u64 *) &sregs->sbus_control;
start &= PAGE_MASK;
- spin_lock_irqsave(&iommu->iommu_lock, flags);
+ if (iommu->strbuf_enabled) {
+ spin_lock_irqsave(&iommu->iommu_lock, flags);
- while(start < end) {
- sregs->sbuf_pflush = start;
- start += PAGE_SIZE;
- }
- sync_word = iommu->sbuf_flushflag_va;
- sregs->sbuf_fsync = iommu->sbuf_flushflag_pa;
- membar("#StoreLoad | #MemIssue");
- while((*sync_word & 0x1) == 0)
- membar("#LoadLoad");
- *sync_word = 0;
+ /* 1) Clear the flush flag word */
+ iommu->flushflag = 0;
- spin_unlock_irqrestore(&iommu->iommu_lock, flags);
+ /* 2) Tell the streaming buffer which entries
+ * we want flushed.
+ */
+ while(start < end) {
+ sregs->sbuf_pflush = start;
+ start += PAGE_SIZE;
+ }
+
+ /* 3) Initiate flush sequence. */
+ sregs->sbuf_fsync = __pa(&(iommu->flushflag));
+
+ /* 4) Guarentee completion of all previous writes
+ * by reading SYSIO's SBUS control register.
+ */
+ tmp = *sbctrl;
+
+ /* 5) Wait for flush flag to get set. */
+ while(iommu->flushflag == 0)
+ membar("#LoadLoad");
+
+ spin_unlock_irqrestore(&iommu->iommu_lock, flags);
+ }
}
void mmu_get_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus)
{
- while(sz >= 0) {
- __u32 page = (__u32) __pa(((unsigned long) sg[sz].addr));
-#ifndef DEBUG_IOMMU
- sg[sz].dvma_addr = page;
-#else
- if((page < dvmaiobase) &&
- (page + sg[sz].len) < dvmaiobase) {
- sg[sz].dvma_addr = page;
- } else {
- /* "can't happen"... GFP_DMA assures this. */
- panic("scsi_sgl high mappings should never happen.");
+ struct iommu_struct *iommu = sbus->iommu;
+ struct sysio_regs *sregs = iommu->sysio_regs;
+ unsigned long flags, tmp;
+ volatile u64 *sbctrl = (volatile u64 *) &sregs->sbus_control;
+
+ if (iommu->strbuf_enabled) {
+ spin_lock_irqsave(&iommu->iommu_lock, flags);
+ iommu->flushflag = 0;
+
+ while(sz >= 0) {
+ unsigned long start = (unsigned long)sg[sz].addr;
+ unsigned long end = PAGE_ALIGN(start + sg[sz].len);
+
+ if (end > MAX_DMA_ADDRESS) {
+ printk("mmu_get_scsi_sgl: Bogus DMA buffer address "
+ "[%016lx:%d]\n", start, (int) sg[sz].len);
+ panic("DMA address too large, tell DaveM");
+ }
+
+ sg[sz--].dvma_addr = sbus_dvma_addr(start);
+ start &= PAGE_MASK;
+ while(start < end) {
+ sregs->sbuf_pflush = start;
+ start += PAGE_SIZE;
+ }
+ }
+
+ sregs->sbuf_fsync = __pa(&(iommu->flushflag));
+ tmp = *sbctrl;
+ while(iommu->flushflag == 0)
+ membar("#LoadLoad");
+ spin_unlock_irqrestore(&iommu->iommu_lock, flags);
+ } else {
+ /* Just verify the addresses and fill in the
+ * dvma_addr fields in this case.
+ */
+ while(sz >= 0) {
+ unsigned long start = (unsigned long)sg[sz].addr;
+ unsigned long end = PAGE_ALIGN(start + sg[sz].len);
+ if (end > MAX_DMA_ADDRESS) {
+ printk("mmu_get_scsi_sgl: Bogus DMA buffer address "
+ "[%016lx:%d]\n", start, (int) sg[sz].len);
+ panic("DMA address too large, tell DaveM");
+ }
+ sg[sz--].dvma_addr = sbus_dvma_addr(start);
}
-#endif
- sz--;
}
}
@@ -462,30 +530,98 @@
{
struct iommu_struct *iommu = sbus->iommu;
struct sysio_regs *sregs = iommu->sysio_regs;
- unsigned long flags;
- unsigned int *sync_word;
+ volatile u64 *sbctrl = (volatile u64 *) &sregs->sbus_control;
+ unsigned long flags, tmp;
- spin_lock_irqsave(&iommu->iommu_lock, flags);
+ if (iommu->strbuf_enabled) {
+ spin_lock_irqsave(&iommu->iommu_lock, flags);
- while(sz >= 0) {
- unsigned long start = sg[sz].dvma_addr;
- unsigned long end = PAGE_ALIGN(start + sg[sz].len);
+ /* 1) Clear the flush flag word */
+ iommu->flushflag = 0;
- start &= PAGE_MASK;
- while(start < end) {
- sregs->sbuf_pflush = start;
- start += PAGE_SIZE;
+ /* 2) Tell the streaming buffer which entries
+ * we want flushed.
+ */
+ while(sz >= 0) {
+ unsigned long start = sg[sz].dvma_addr;
+ unsigned long end = PAGE_ALIGN(start + sg[sz].len);
+
+ start &= PAGE_MASK;
+ while(start < end) {
+ sregs->sbuf_pflush = start;
+ start += PAGE_SIZE;
+ }
+ sz--;
}
- sz--;
+
+ /* 3) Initiate flush sequence. */
+ sregs->sbuf_fsync = __pa(&(iommu->flushflag));
+
+ /* 4) Guarentee completion of previous writes
+ * by reading SYSIO's SBUS control register.
+ */
+ tmp = *sbctrl;
+
+ /* 5) Wait for flush flag to get set. */
+ while(iommu->flushflag == 0)
+ membar("#LoadLoad");
+
+ spin_unlock_irqrestore(&iommu->iommu_lock, flags);
}
- sync_word = iommu->sbuf_flushflag_va;
- sregs->sbuf_fsync = iommu->sbuf_flushflag_pa;
- membar("#StoreLoad | #MemIssue");
- while((*sync_word & 0x1) == 0)
- membar("#LoadLoad");
- *sync_word = 0;
+}
+
+void mmu_set_sbus64(struct linux_sbus_device *sdev, int bursts)
+{
+ struct linux_sbus *sbus = sdev->my_bus;
+ struct sysio_regs *sregs = sbus->iommu->sysio_regs;
+ int slot = sdev->slot;
+ u64 *cfg, tmp;
+
+ switch(slot) {
+ case 0:
+ cfg = &sregs->sbus_s0cfg;
+ break;
+ case 1:
+ cfg = &sregs->sbus_s1cfg;
+ break;
+ case 2:
+ cfg = &sregs->sbus_s2cfg;
+ break;
+ case 3:
+ cfg = &sregs->sbus_s3cfg;
+ break;
+
+ case 13:
+ cfg = &sregs->sbus_s4cfg;
+ break;
+ case 14:
+ cfg = &sregs->sbus_s5cfg;
+ break;
+ case 15:
+ cfg = &sregs->sbus_s6cfg;
+ break;
+
+ default:
+ return;
+ };
+
+ /* ETM already enabled? If so, we're done. */
+ tmp = *cfg;
+ if ((tmp & SYSIO_SBSCFG_ETM) != 0)
+ return;
- spin_unlock_irqrestore(&iommu->iommu_lock, flags);
+ /* Set burst bits. */
+ if (bursts & DMA_BURST8)
+ tmp |= SYSIO_SBSCFG_BA8;
+ if (bursts & DMA_BURST16)
+ tmp |= SYSIO_SBSCFG_BA16;
+ if (bursts & DMA_BURST32)
+ tmp |= SYSIO_SBSCFG_BA32;
+ if (bursts & DMA_BURST64)
+ tmp |= SYSIO_SBSCFG_BA64;
+
+ /* Finally turn on ETM and set register. */
+ *cfg = (tmp | SYSIO_SBSCFG_ETM);
}
int mmu_info(char *buf)
@@ -562,16 +698,9 @@
*/
static void __flush_nucleus_vptes(void)
{
- unsigned long pstate;
unsigned long prom_reserved_base = 0xfffffffc00000000UL;
int i;
- __asm__ __volatile__("flushw\n\t"
- "rdpr %%pstate, %0\n\t"
- "wrpr %0, %1, %%pstate"
- : "=r" (pstate)
- : "i" (PSTATE_IE));
-
/* Only DTLB must be checked for VPTE entries. */
for(i = 0; i < 63; i++) {
unsigned long tag = spitfire_get_dtlb_tag(i);
@@ -586,8 +715,6 @@
membar("#Sync");
}
}
- __asm__ __volatile__("wrpr %0, 0, %%pstate"
- : : "r" (pstate));
}
static int prom_ditlb_set = 0;
@@ -600,10 +727,19 @@
void prom_world(int enter)
{
+ unsigned long pstate;
int i;
if (!prom_ditlb_set)
return;
+
+ /* Make sure the following runs atomically. */
+ __asm__ __volatile__("flushw\n\t"
+ "rdpr %%pstate, %0\n\t"
+ "wrpr %0, %1, %%pstate"
+ : "=r" (pstate)
+ : "i" (PSTATE_IE));
+
if (enter) {
/* Kick out nucleus VPTEs. */
__flush_nucleus_vptes();
@@ -648,6 +784,8 @@
}
}
}
+ __asm__ __volatile__("wrpr %0, 0, %%pstate"
+ : : "r" (pstate));
}
void inherit_locked_prom_mappings(int save_p)
@@ -680,7 +818,7 @@
unsigned long data;
data = spitfire_get_dtlb_data(i);
- if(data & _PAGE_L) {
+ if((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
unsigned long tag = spitfire_get_dtlb_tag(i);
if(save_p) {
@@ -703,7 +841,7 @@
unsigned long data;
data = spitfire_get_itlb_data(i);
- if(data & _PAGE_L) {
+ if((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
unsigned long tag = spitfire_get_itlb_tag(i);
if(save_p) {
@@ -867,15 +1005,11 @@
struct pgtable_cache_struct pgt_quicklists;
#endif
-/* XXX Add __GFP_HIGH to these calls to "fool" page allocator
- * XXX so we don't go to swap so quickly... then do the same
- * XXX for get_user_page as well -DaveM
- */
pmd_t *get_pmd_slow(pgd_t *pgd, unsigned long offset)
{
pmd_t *pmd;
- pmd = (pmd_t *) __get_free_page(GFP_DMA|GFP_KERNEL);
+ pmd = (pmd_t *) __get_free_page(GFP_KERNEL);
if(pmd) {
memset(pmd, 0, PAGE_SIZE);
pgd_set(pgd, pmd);
@@ -888,7 +1022,7 @@
{
pte_t *pte;
- pte = (pte_t *) __get_free_page(GFP_DMA|GFP_KERNEL);
+ pte = (pte_t *) __get_free_page(GFP_KERNEL);
if(pte) {
memset(pte, 0, PAGE_SIZE);
pmd_set(pmd, pte);
@@ -997,11 +1131,11 @@
__initfunc(unsigned long
paging_init(unsigned long start_mem, unsigned long end_mem))
{
- extern void setup_tba(void);
extern pmd_t swapper_pmd_dir[1024];
- extern unsigned long irq_init(unsigned long start_mem, unsigned long end_mem);
- extern unsigned int sparc64_vpte_patchme[1];
+ extern unsigned int sparc64_vpte_patchme1[1];
+ extern unsigned int sparc64_vpte_patchme2[1];
unsigned long alias_base = phys_base + PAGE_OFFSET;
+ unsigned long second_alias_page = 0;
unsigned long pt;
unsigned long flags;
unsigned long shift = alias_base - ((unsigned long)&empty_zero_page);
@@ -1026,6 +1160,21 @@
: "r" (TLB_TAG_ACCESS), "r" (alias_base), "r" (pt),
"i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" (61 << 3)
: "memory");
+ if (start_mem >= KERNBASE + 0x340000) {
+ second_alias_page = alias_base + 0x400000;
+ __asm__ __volatile__("
+ stxa %1, [%0] %3
+ stxa %2, [%5] %4
+ membar #Sync
+ flush %%g6
+ nop
+ nop
+ nop"
+ : /* No outputs */
+ : "r" (TLB_TAG_ACCESS), "r" (second_alias_page), "r" (pt + 0x400000),
+ "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" (60 << 3)
+ : "memory");
+ }
__restore_flags(flags);
/* Now set kernel pgd to upper alias so physical page computations
@@ -1038,10 +1187,10 @@
/* Now can init the kernel/bad page tables. */
pgd_set(&swapper_pg_dir[0], swapper_pmd_dir + (shift / sizeof(pgd_t)));
- sparc64_vpte_patchme[0] |= (init_mm.pgd[0] >> 10);
+ sparc64_vpte_patchme1[0] |= (init_mm.pgd[0] >> 10);
+ sparc64_vpte_patchme2[0] |= (init_mm.pgd[0] & 0x3ff);
+ flushi((long)&sparc64_vpte_patchme1[0]);
- start_mem = irq_init(start_mem, end_mem);
-
/* We use mempool to create page tables, therefore adjust it up
* such that __pa() macros etc. work.
*/
@@ -1051,9 +1200,20 @@
allocate_ptable_skeleton(DVMA_VADDR, DVMA_VADDR + 0x4000000);
inherit_prom_mappings();
-
- /* Ok, we can use our TLB miss and window trap handlers safely. */
- setup_tba();
+ /* Ok, we can use our TLB miss and window trap handlers safely.
+ * We need to do a quick peek here to see if we are on StarFire
+ * or not, so setup_tba can setup the IRQ globals correctly (it
+ * needs to get the hard smp processor id correctly).
+ */
+ {
+ extern void setup_tba(int);
+ int is_starfire = prom_finddevice("/ssp-serial");
+ if(is_starfire != 0 && is_starfire != -1)
+ is_starfire = 1;
+ else
+ is_starfire = 0;
+ setup_tba(is_starfire);
+ }
/* Really paranoid. */
flushi((long)&empty_zero_page);
@@ -1064,6 +1224,8 @@
*/
/* We only created DTLB mapping of this stuff. */
spitfire_flush_dtlb_nucleus_page(alias_base);
+ if (second_alias_page)
+ spitfire_flush_dtlb_nucleus_page(second_alias_page);
membar("#Sync");
/* Paranoid */
@@ -1079,10 +1241,6 @@
return device_scan (PAGE_ALIGN (start_mem));
}
-/* XXX Add also PG_Hole flag, set it in the page structs here,
- * XXX remove FREE_UNUSED_MEM_MAP code, and the nfsd file handle
- * problems will all be gone. -DaveM
- */
__initfunc(static void taint_real_pages(unsigned long start_mem, unsigned long end_mem))
{
unsigned long tmp = 0, paddr, endaddr;
@@ -1096,14 +1254,24 @@
if (!sp_banks[tmp].num_bytes) {
mem_map[paddr>>PAGE_SHIFT].flags |= (1<<PG_skip);
mem_map[paddr>>PAGE_SHIFT].next_hash = mem_map + (phys_base >> PAGE_SHIFT);
+ mem_map[(paddr>>PAGE_SHIFT)+1UL].flags |= (1<<PG_skip);
+ mem_map[(paddr>>PAGE_SHIFT)+1UL].next_hash = mem_map + (phys_base >> PAGE_SHIFT);
return;
}
if (sp_banks[tmp].base_addr > paddr) {
- /* Making a one or two pages PG_skip holes is not necessary */
- if (sp_banks[tmp].base_addr - paddr > 2 * PAGE_SIZE) {
+ /* Making a one or two pages PG_skip holes
+ * is not necessary. We add one more because
+ * we must set the PG_skip flag on the first
+ * two mem_map[] entries for the hole. Go and
+ * see the mm/filemap.c:shrink_mmap() loop for
+ * details. -DaveM
+ */
+ if (sp_banks[tmp].base_addr - paddr > 3 * PAGE_SIZE) {
mem_map[paddr>>PAGE_SHIFT].flags |= (1<<PG_skip);
mem_map[paddr>>PAGE_SHIFT].next_hash = mem_map + (sp_banks[tmp].base_addr >> PAGE_SHIFT);
+ mem_map[(paddr>>PAGE_SHIFT)+1UL].flags |= (1<<PG_skip);
+ mem_map[(paddr>>PAGE_SHIFT)+1UL].next_hash = mem_map + (sp_banks[tmp].base_addr >> PAGE_SHIFT);
}
paddr = sp_banks[tmp].base_addr;
}
@@ -1111,7 +1279,8 @@
endaddr = sp_banks[tmp].base_addr + sp_banks[tmp].num_bytes;
while (paddr < endaddr) {
mem_map[paddr>>PAGE_SHIFT].flags &= ~(1<<PG_reserved);
- if (paddr >= dvmaiobase)
+ set_bit(paddr >> 22, sparc64_valid_addr_bitmap);
+ if (paddr >= (MAX_DMA_ADDRESS - PAGE_OFFSET))
mem_map[paddr>>PAGE_SHIFT].flags &= ~(1<<PG_DMA);
paddr += PAGE_SIZE;
}
@@ -1131,6 +1300,12 @@
end_mem &= PAGE_MASK;
max_mapnr = MAP_NR(end_mem);
high_memory = (void *) end_mem;
+
+ sparc64_valid_addr_bitmap = (unsigned long *)start_mem;
+ i = max_mapnr >> ((22 - PAGE_SHIFT) + 6);
+ i += 1;
+ memset(sparc64_valid_addr_bitmap, 0, i << 3);
+ start_mem += i << 3;
start_mem = PAGE_ALIGN(start_mem);
num_physpages = 0;
@@ -1138,6 +1313,8 @@
if (phys_base) {
mem_map[0].flags |= (1<<PG_skip) | (1<<PG_reserved);
mem_map[0].next_hash = mem_map + (phys_base >> PAGE_SHIFT);
+ mem_map[1].flags |= (1<<PG_skip) | (1<<PG_reserved);
+ mem_map[1].next_hash = mem_map + (phys_base >> PAGE_SHIFT);
}
addr = PAGE_OFFSET + phys_base;
@@ -1148,6 +1325,7 @@
else
#endif
mem_map[MAP_NR(addr)].flags |= (1<<PG_reserved);
+ set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap);
addr += PAGE_SIZE;
}
@@ -1159,6 +1337,9 @@
if (PageSkip(page)) {
unsigned long low, high;
+ /* See taint_real_pages() for why this is done. -DaveM */
+ page++;
+
low = PAGE_ALIGN((unsigned long)(page+1));
if (page->next_hash < page)
high = ((unsigned long)end) & PAGE_MASK;
@@ -1255,7 +1436,7 @@
val->totalram = 0;
val->sharedram = 0;
- val->freeram = nr_free_pages << PAGE_SHIFT;
+ val->freeram = ((unsigned long)nr_free_pages) << PAGE_SHIFT;
val->bufferram = buffermem;
for (page = mem_map, end = mem_map + max_mapnr;
page < end; page++) {
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)