patch-2.3.25 linux/include/asm-arm/pgtable.h
Next file: linux/include/asm-arm/proc-armo/cache.h
Previous file: linux/include/asm-arm/page.h
Back to the patch index
Back to the overall index
- Lines: 386
- Date:
Thu Oct 28 10:16:02 1999
- Orig file:
v2.3.24/linux/include/asm-arm/pgtable.h
- Orig date:
Fri Oct 22 13:21:53 1999
diff -u --recursive --new-file v2.3.24/linux/include/asm-arm/pgtable.h linux/include/asm-arm/pgtable.h
@@ -1,55 +1,155 @@
+/*
+ * linux/include/asm-arm/pgtable.h
+ */
#ifndef _ASMARM_PGTABLE_H
#define _ASMARM_PGTABLE_H
#include <linux/config.h>
-#include <asm/arch/memory.h> /* For TASK_SIZE */
+#include <asm/arch/memory.h>
#include <asm/proc-fns.h>
#include <asm/system.h>
-#include <asm/proc/cache.h>
+
+/*
+ * PMD_SHIFT determines the size of the area a second-level page table can map
+ * PGDIR_SHIFT determines what a third-level page table entry can map
+ */
+#define PMD_SHIFT 20
+#define PGDIR_SHIFT 20
#define LIBRARY_TEXT_START 0x0c000000
-#undef TEST_VERIFY_AREA
+#ifndef __ASSEMBLY__
+extern void __pte_error(const char *file, int line, unsigned long val);
+extern void __pmd_error(const char *file, int line, unsigned long val);
+extern void __pgd_error(const char *file, int line, unsigned long val);
+
+#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
+#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd))
+#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
+#endif /* !__ASSEMBLY__ */
+
+#define PMD_SIZE (1UL << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE-1))
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
/*
- * BAD_PAGETABLE is used when we need a bogus page-table, while
- * BAD_PAGE is used for a bogus page.
- *
+ * The table below defines the page protection levels that we insert into our
+ * Linux page table version. These get translated into the best that the
+ * architecture can perform. Note that on most ARM hardware:
+ * 1) We cannot do execute protection
+ * 2) If we could do execute protection, then read is implied
+ * 3) write implies read permissions
+ */
+#define __P000 PAGE_NONE
+#define __P001 PAGE_READONLY
+#define __P010 PAGE_COPY
+#define __P011 PAGE_COPY
+#define __P100 PAGE_READONLY
+#define __P101 PAGE_READONLY
+#define __P110 PAGE_COPY
+#define __P111 PAGE_COPY
+
+#define __S000 PAGE_NONE
+#define __S001 PAGE_READONLY
+#define __S010 PAGE_SHARED
+#define __S011 PAGE_SHARED
+#define __S100 PAGE_READONLY
+#define __S101 PAGE_READONLY
+#define __S110 PAGE_SHARED
+#define __S111 PAGE_SHARED
+
+#ifndef __ASSEMBLY__
+/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
-extern pte_t __bad_page(void);
-extern pte_t * __bad_pagetable(void);
-extern unsigned long *empty_zero_page;
-
-#define BAD_PAGETABLE __bad_pagetable()
-#define BAD_PAGE __bad_page()
-#define ZERO_PAGE(vaddr) ((unsigned long) empty_zero_page)
-
-/* number of bits that fit into a memory pointer */
-#define BYTES_PER_PTR (sizeof(unsigned long))
-#define BITS_PER_PTR (8*BYTES_PER_PTR)
-
-/* to align the pointer to a pointer address */
-#define PTR_MASK (~(sizeof(void*)-1))
-
-/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
-#define SIZEOF_PTR_LOG2 2
-
-/* to find an entry in a page-table */
-#define PAGE_PTR(address) \
- ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
+struct page *empty_zero_page;
+#define ZERO_PAGE(vaddr) (empty_zero_page)
+
+/*
+ * Handling allocation failures during page table setup.
+ */
+extern void __handle_bad_pmd(pmd_t *pmd);
+extern void __handle_bad_pmd_kernel(pmd_t *pmd);
+
+#define pte_none(pte) (!pte_val(pte))
+#define pte_clear(ptep) set_pte((ptep), __pte(0))
+#define pte_pagenr(pte) ((unsigned long)(((pte_val(pte) - PHYS_OFFSET) >> PAGE_SHIFT)))
+
+#define pmd_none(pmd) (!pmd_val(pmd))
+#define pmd_clear(pmdp) set_pmd(pmdp, __pmd(0))
+
+/*
+ * Permanent address of a page.
+ */
+#define page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
+#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT))
+#define pte_page(x) (mem_map + pte_pagenr(x))
+
+/*
+ * The "pgd_xxx()" functions here are trivial for a folded two-level
+ * setup: the pgd is never bad, and a pmd always exists (as it's folded
+ * into the pgd entry)
+ */
+#define pgd_none(pgd) (0)
+#define pgd_bad(pgd) (0)
+#define pgd_present(pgd) (1)
+#define pgd_clear(pgdp)
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+extern __inline__ pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
+{
+ pte_t pte;
+ pte_val(pte) = physpage | pgprot_val(pgprot);
+ return pte;
+}
-extern void __bad_pmd(pmd_t *pmd);
-extern void __bad_pmd_kernel(pmd_t *pmd);
+extern __inline__ pte_t mk_pte(struct page *page, pgprot_t pgprot)
+{
+ pte_t pte;
+ pte_val(pte) = (PHYS_OFFSET + ((page - mem_map) << PAGE_SHIFT)) | pgprot_val(pgprot);
+ return pte;
+}
+
+#define page_pte_prot(page,prot) mk_pte(page, prot)
+#define page_pte(page) mk_pte(page, __pgprot(0))
+
+/* to find an entry in a page-table-directory */
+#define __pgd_offset(addr) ((addr) >> PGDIR_SHIFT)
+
+#define pgd_offset(mm, addr) ((mm)->pgd+__pgd_offset(addr))
+
+/* to find an entry in a kernel page-table-directory */
+#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
+
+/* Find an entry in the second-level page table.. */
+#define pmd_offset(dir, addr) ((pmd_t *)(dir))
+
+/* Find an entry in the third-level page table.. */
+#define __pte_offset(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_offset(dir, addr) ((pte_t *)pmd_page(*(dir)) + __pte_offset(addr))
+
+/*
+ * Get the cache handling stuff now.
+ */
+#include <asm/proc/cache.h>
/*
* Page table cache stuff
*/
#ifndef CONFIG_NO_PGT_CACHE
-#ifndef __SMP__
+#ifdef __SMP__
+#error Pgtable caches have to be per-CPU, so that no locking is needed.
+#endif /* __SMP__ */
+
extern struct pgtable_cache_struct {
unsigned long *pgd_cache;
unsigned long *pte_cache;
@@ -61,10 +161,6 @@
#define pte_quicklist (quicklists.pte_cache)
#define pgtable_cache_size (quicklists.pgtable_cache_sz)
-#else /* __SMP__ */
-#error Pgtable caches have to be per-CPU, so that no locking is needed.
-#endif /* __SMP__ */
-
/* used for quicklists */
#define __pgd_next(pgd) (((unsigned long *)pgd)[1])
#define __pte_next(pte) (((unsigned long *)pte)[0])
@@ -73,7 +169,7 @@
{
unsigned long *ret;
- if((ret = pgd_quicklist) != NULL) {
+ if ((ret = pgd_quicklist) != NULL) {
pgd_quicklist = (unsigned long *)__pgd_next(ret);
ret[1] = ret[2];
clean_cache_area(ret + 1, 4);
@@ -82,10 +178,18 @@
return (pgd_t *)ret;
}
+extern __inline__ void free_pgd_fast(pgd_t *pgd)
+{
+ __pgd_next(pgd) = (unsigned long) pgd_quicklist;
+ pgd_quicklist = (unsigned long *) pgd;
+ pgtable_cache_size++;
+}
+
/* We don't use pmd cache, so this is a dummy routine */
-extern __inline__ pmd_t *get_pmd_fast(void)
+#define get_pmd_fast() ((pmd_t *)0)
+
+extern __inline__ void free_pmd_fast(pmd_t *pmd)
{
- return (pmd_t *)0;
}
extern __inline__ pte_t *get_pte_fast(void)
@@ -101,17 +205,6 @@
return (pte_t *)ret;
}
-extern __inline__ void free_pgd_fast(pgd_t *pgd)
-{
- __pgd_next(pgd) = (unsigned long) pgd_quicklist;
- pgd_quicklist = (unsigned long *) pgd;
- pgtable_cache_size++;
-}
-
-extern __inline__ void free_pmd_fast(pmd_t *pmd)
-{
-}
-
extern __inline__ void free_pte_fast(pte_t *pte)
{
__pte_next(pte) = (unsigned long) pte_quicklist;
@@ -121,9 +214,13 @@
#else /* CONFIG_NO_PGT_CACHE */
-#define get_pgd_fast() (NULL)
-#define get_pmd_fast() (NULL)
-#define get_pte_fast() (NULL)
+#define pgd_quicklist ((unsigned long *)0)
+#define pmd_quicklist ((unsigned long *)0)
+#define pte_quicklist ((unsigned long *)0)
+
+#define get_pgd_fast() ((pgd_t *)0)
+#define get_pmd_fast() ((pmd_t *)0)
+#define get_pte_fast() ((pte_t *)0)
#define free_pgd_fast(pgd) free_pgd_slow(pgd)
#define free_pmd_fast(pmd) free_pmd_slow(pmd)
@@ -131,8 +228,93 @@
#endif /* CONFIG_NO_PGT_CACHE */
+extern pgd_t *get_pgd_slow(void);
+extern void free_pgd_slow(pgd_t *pgd);
+
+#define free_pmd_slow(pmd) do { } while (0)
+
+extern pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long addr_preadjusted);
+extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long addr_preadjusted);
+extern void free_pte_slow(pte_t *pte);
+
#include <asm/proc/pgtable.h>
+extern __inline__ pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
+ return pte;
+}
+
+/*
+ * Allocate and free page tables. The xxx_kernel() versions are
+ * used to allocate a kernel page table - this turns on ASN bits
+ * if any.
+ */
+#define pte_free_kernel(pte) free_pte_fast(pte)
+#define pte_free(pte) free_pte_fast(pte)
+
+#ifndef pte_alloc_kernel
+extern __inline__ pte_t * pte_alloc_kernel(pmd_t *pmd, unsigned long address)
+{
+ address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
+ if (pmd_none(*pmd)) {
+ pte_t *page = (pte_t *) get_pte_fast();
+
+ if (!page)
+ return get_pte_kernel_slow(pmd, address);
+ set_pmd(pmd, mk_kernel_pmd(page));
+ return page + address;
+ }
+ if (pmd_bad(*pmd)) {
+ __handle_bad_pmd_kernel(pmd);
+ return NULL;
+ }
+ return (pte_t *) pmd_page(*pmd) + address;
+}
+#endif
+
+extern __inline__ pte_t *pte_alloc(pmd_t * pmd, unsigned long address)
+{
+ address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
+ if (pmd_none(*pmd)) {
+ pte_t *page = (pte_t *) get_pte_fast();
+
+ if (!page)
+ return get_pte_slow(pmd, address);
+ set_pmd(pmd, mk_user_pmd(page));
+ return page + address;
+ }
+ if (pmd_bad(*pmd)) {
+ __handle_bad_pmd(pmd);
+ return NULL;
+ }
+ return (pte_t *) pmd_page(*pmd) + address;
+}
+
+#define pmd_free_kernel pmd_free
+#define pmd_free(pmd) do { } while (0)
+
+#define pmd_alloc_kernel pmd_alloc
+extern __inline__ pmd_t *pmd_alloc(pgd_t *pgd, unsigned long address)
+{
+ return (pmd_t *) pgd;
+}
+
+#define pgd_free(pgd) free_pgd_fast(pgd)
+
+extern __inline__ pgd_t *pgd_alloc(void)
+{
+ pgd_t *pgd;
+
+ pgd = get_pgd_fast();
+ if (!pgd)
+ pgd = get_pgd_slow();
+
+ return pgd;
+}
+
+extern int do_check_pgt_cache(int, int);
+
extern __inline__ void set_pgdir(unsigned long address, pgd_t entry)
{
struct task_struct * p;
@@ -159,15 +341,16 @@
#define update_mmu_cache(vma,address,pte)
-#define SWP_TYPE(entry) (((entry) >> 2) & 0x7f)
-#define SWP_OFFSET(entry) ((entry) >> 9)
-#define SWP_ENTRY(type,offset) (((type) << 2) | ((offset) << 9))
+/*
+ * We support up to 32GB of swap on 4k machines
+ */
+#define SWP_TYPE(entry) (((pte_val(entry)) >> 2) & 0x7f)
+#define SWP_OFFSET(entry) ((pte_val(entry)) >> 9)
+#define SWP_ENTRY(type,offset) __pte((((type) << 2) | ((offset) << 9)))
#define module_map vmalloc
#define module_unmap vfree
-extern int do_check_pgt_cache(int, int);
-
/*
* We rely on GCC optimising this code away for
* architectures which it doesn't apply to. Note
@@ -189,5 +372,7 @@
#define kern_addr_valid(addr) (!machine_is_riscpc() || __kern_valid_addr(addr))
#define io_remap_page_range remap_page_range
+
+#endif /* !__ASSEMBLY__ */
#endif /* _ASMARM_PGTABLE_H */
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)