patch-2.4.23 linux-2.4.23/include/asm-ppc/pgtable.h
Next file: linux-2.4.23/include/asm-ppc/ppc4xx_dma.h
Previous file: linux-2.4.23/include/asm-ppc/pgalloc.h
Back to the patch index
Back to the overall index
- Lines: 238
- Date:
2003-11-28 10:26:21.000000000 -0800
- Orig file:
linux-2.4.22/include/asm-ppc/pgtable.h
- Orig date:
2003-08-25 04:44:44.000000000 -0700
diff -urN linux-2.4.22/include/asm-ppc/pgtable.h linux-2.4.23/include/asm-ppc/pgtable.h
@@ -109,6 +109,13 @@
extern unsigned long va_to_phys(unsigned long address);
extern pte_t *va_to_pte(unsigned long address);
extern unsigned long ioremap_bot, ioremap_base;
+extern unsigned long vmalloc_start;
+
+/* Start and end of the vmalloc area. */
+#define VMALLOC_START vmalloc_start
+#define VMALLOC_END ioremap_bot
+#define VMALLOC_VMADDR(x) ((unsigned long)(x))
+
#endif /* __ASSEMBLY__ */
/*
@@ -154,13 +161,23 @@
* and ITLB, respectively (see "mmu.h" for definitions).
*/
-/* PMD_SHIFT determines the size of the area mapped by the second-level page tables */
-#define PMD_SHIFT 22
+/*
+ * The normal case is that PTEs are 32-bits and we have a 1-page
+ * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
+ *
+ * For any >32-bit physical address platform, we can use the following
+ * two level page table layout where the pgdir is 8KB and the MS 13 bits
+ * are an index to the second level table. The combined pgdir/pmd first
+ * level has 2048 entries and the second level has 512 64-bit PTE entries.
+ * -Matt
+ */
+/* PMD_SHIFT determines the size of the area mapped by the PTE pages */
+#define PMD_SHIFT (PAGE_SHIFT + PTE_SHIFT)
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
-/* PGDIR_SHIFT determines what a third-level page table entry can map */
-#define PGDIR_SHIFT 22
+/* PGDIR_SHIFT determines what a top-level page table entry can map */
+#define PGDIR_SHIFT PMD_SHIFT
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
@@ -168,9 +185,10 @@
* entries per page directory level: our page-table tree is two-level, so
* we don't really have any PMD directory.
*/
-#define PTRS_PER_PTE 1024
+#define PTRS_PER_PTE (1 << PTE_SHIFT)
#define PTRS_PER_PMD 1
-#define PTRS_PER_PGD 1024
+#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
+
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
#define FIRST_USER_PGD_NR 0
@@ -178,35 +196,13 @@
#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
#define pte_ERROR(e) \
- printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
+ printk("%s:%d: bad pte "PTE_FMT".\n", __FILE__, __LINE__, pte_val(e))
#define pmd_ERROR(e) \
printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
#define pgd_ERROR(e) \
printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
/*
- * Just any arbitrary offset to the start of the vmalloc VM area: the
- * current 64MB value just means that there will be a 64MB "hole" after the
- * physical memory until the kernel virtual memory starts. That means that
- * any out-of-bounds memory accesses will hopefully be caught.
- * The vmalloc() routines leaves a hole of 4kB between each vmalloced
- * area for the same reason. ;)
- *
- * We no longer map larger than phys RAM with the BATs so we don't have
- * to worry about the VMALLOC_OFFSET causing problems. We do have to worry
- * about clashes between our early calls to ioremap() that start growing down
- * from ioremap_base being run into the VM area allocations (growing upwards
- * from VMALLOC_START). For this reason we have ioremap_bot to check when
- * we actually run into our mappings setup in the early boot with the VM
- * system. This really does become a problem for machines with good amounts
- * of RAM. -- Cort
- */
-#define VMALLOC_OFFSET (0x1000000) /* 16M */
-#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
-#define VMALLOC_VMADDR(x) ((unsigned long)(x))
-#define VMALLOC_END ioremap_bot
-
-/*
* Bits in a linux-style PTE. These match the bits in the
* (hardware-defined) PowerPC PTE as closely as possible.
*/
@@ -252,6 +248,44 @@
#define _PAGE_ACCESSED 0x400 /* software: R: page referenced */
#define _PMD_PRESENT PAGE_MASK
+#elif defined(CONFIG_44x)
+/*
+ * Definitions for PPC44x
+ *
+ * Because of the 3 word TLB entries to support 36-bit addressing,
+ * the attribute are difficult to map in such a fashion that they
+ * are easily loaded during exception processing. I decided to
+ * organize the entry so the ERPN is the only portion in the
+ * upper word of the PTE and the attribute bits below are packed
+ * in as sensibly as they can be in the area below a 4KB page size
+ * oriented RPN. This at least makes it easy to load the RPN and
+ * ERPN fields in the TLB. -Matt
+ *
+ * Note that these bits preclude future use of a page size
+ * less than 4KB.
+ */
+#define _PAGE_PRESENT 0x00000001 /* S: PTE valid */
+#define _PAGE_RW 0x00000002 /* S: Write permission */
+#define _PAGE_DIRTY 0x00000004 /* S: Page dirty */
+#define _PAGE_ACCESSED 0x00000008 /* S: Page referenced */
+#define _PAGE_HWWRITE 0x00000010 /* H: Dirty & RW */
+#define _PAGE_HWEXEC 0x00000020 /* H: Execute permission */
+#define _PAGE_USER 0x00000040 /* S: User page */
+#define _PAGE_ENDIAN 0x00000080 /* H: E bit */
+#define _PAGE_GUARDED 0x00000100 /* H: G bit */
+#define _PAGE_COHERENT 0x00000200 /* H: M bit */
+#define _PAGE_FILE 0x00000400 /* S: nonlinear file mapping */
+#define _PAGE_NO_CACHE 0x00000400 /* H: I bit */
+#define _PAGE_WRITETHRU 0x00000800 /* H: W bit */
+
+/* TODO: Add large page lowmem mapping support */
+#define _PMD_PRESENT PAGE_MASK
+#define _PMD_PRESENT_MASK (PAGE_MASK)
+#define _PMD_BAD (~PAGE_MASK)
+
+/* ERPN in a PTE never gets cleared, ignore it */
+#define _PTE_NONE_MASK 0xffffffff00000000ULL
+
#elif defined(CONFIG_8xx)
/* Definitions for 8xx embedded chips. */
#define _PAGE_PRESENT 0x0001 /* Page is valid */
@@ -271,6 +305,16 @@
#define _PAGE_DIRTY 0x0200 /* software: page changed */
#define _PAGE_USER 0x0800 /* One of the PP bits, the other is USER&~RW */
+/*
+ * The 8xx TLB miss handler allegedly sets _PAGE_ACCESSED in the PTE
+ * for an address even if _PAGE_PRESENT is not set, as a performance
+ * optimization. This is a bug if you ever want to use swap unless
+ * _PAGE_ACCESSED is 2, which it isn't, or unless you have 8xx-specific
+ * definitions for __swp_entry etc. below, which would be gross.
+ * -- paulus
+ */
+#define _PTE_NONE_MASK _PAGE_ACCESSED
+
#else /* CONFIG_6xx */
/* Definitions for 60x, 740/750, etc. */
#define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
@@ -284,21 +328,19 @@
#define _PAGE_ACCESSED 0x100 /* R: page referenced */
#define _PAGE_EXEC 0x200 /* software: i-cache coherency required */
#define _PAGE_RW 0x400 /* software: user write access allowed */
+
+#define _PTE_NONE_MASK _PAGE_HASHPTE
+
#endif
-/* The non-standard PowerPC MMUs, which includes the 4xx and 8xx (and
- * mabe 603e) have TLB miss handlers that unconditionally set the
- * _PAGE_ACCESSED flag as a performance optimization. This causes
- * problems for the page_none() macro, just like the HASHPTE flag does
- * for the standard PowerPC MMUs. Depending upon the MMU configuration,
- * either HASHPTE or ACCESSED will have to be masked to give us a
- * proper pte_none() condition.
+/*
+ * Some bits are only used on some cpu families...
*/
#ifndef _PAGE_HASHPTE
#define _PAGE_HASHPTE 0
-#define _PTE_NONE_MASK _PAGE_ACCESSED
-#else
-#define _PTE_NONE_MASK _PAGE_HASHPTE
+#endif
+#ifndef _PTE_NONE_MASK
+#define _PTE_NONE_MASK 0
#endif
#ifndef _PAGE_SHARED
#define _PAGE_SHARED 0
@@ -324,7 +366,16 @@
#define _PAGE_BASE _PAGE_PRESENT | _PAGE_ACCESSED
#define _PAGE_WRENABLE _PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE
+/*
+ * 44x wants _PAGE_GUARDED on all kernel pages for various reasons.
+ * Allegedly that doesn't hurt performance. -- paulus
+ */
+#ifdef CONFIG_44x
+#define _PAGE_KERNEL _PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED | _PAGE_HWEXEC | _PAGE_GUARDED
+#else
#define _PAGE_KERNEL _PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED | _PAGE_HWEXEC
+#endif
+
#define _PAGE_IO _PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED
#define PAGE_NONE __pgprot(_PAGE_BASE)
@@ -438,7 +489,7 @@
* and a page entry and page directory to the page they refer to.
*/
-static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
+static inline pte_t mk_pte_phys(phys_addr_t physpage, pgprot_t pgprot)
{
pte_t pte;
pte_val(pte) = physpage | pgprot_val(pgprot);
@@ -463,6 +514,8 @@
*
* pte_update clears and sets bit atomically, and returns
* the old pte value.
+ * The ((unsigned long)(p+1) - 4) hack is to get to the least-significant
+ * 32 bits of the PTE regardless of whether PTEs are 32 or 64 bits.
*/
static inline unsigned long pte_update(pte_t *p, unsigned long clr,
unsigned long set)
@@ -477,7 +530,7 @@
" stwcx. %1,0,%3\n\
bne- 1b"
: "=&r" (old), "=&r" (tmp), "=m" (*p)
- : "r" (p), "r" (clr), "r" (set), "m" (*p)
+ : "r" ((unsigned long)(p+1) - 4), "r" (clr), "r" (set), "m" (*p)
: "cc" );
return old;
}
@@ -544,7 +597,8 @@
return (pte_t *) pmd_page(*dir) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
}
-extern pgd_t swapper_pg_dir[1024];
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+
extern void paging_init(void);
/*
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)