patch-2.4.19 linux-2.4.19/include/asm-ia64/pgtable.h
Next file: linux-2.4.19/include/asm-ia64/processor.h
Previous file: linux-2.4.19/include/asm-ia64/perfmon.h
Back to the patch index
Back to the overall index
- Lines: 118
- Date:
Fri Aug 2 17:39:45 2002
- Orig file:
linux-2.4.18/include/asm-ia64/pgtable.h
- Orig date:
Sun Nov 11 10:20:21 2001
diff -urN linux-2.4.18/include/asm-ia64/pgtable.h linux-2.4.19/include/asm-ia64/pgtable.h
@@ -8,7 +8,7 @@
* This hopefully works with any (fixed) IA-64 page-size, as defined
* in <asm/page.h> (currently 8192).
*
- * Copyright (C) 1998-2001 Hewlett-Packard Co
+ * Copyright (C) 1998-2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
@@ -108,19 +108,15 @@
/*
* All the normal masks have the "page accessed" bits on, as any time
* they are used, the page is accessed. They are cleared only by the
- * page-out routines. On the other hand, we do NOT turn on the
- * execute bit on pages that are mapped writable. For those pages, we
- * turn on the X bit only when the program attempts to actually
- * execute code in such a page (it's a "lazy execute bit", if you
- * will). This lets reduce the amount of i-cache flushing we have to
- * do for data pages such as stack and heap pages.
+ * page-out routines.
*/
#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_A)
#define PAGE_SHARED __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
#define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
-#define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
+#define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
#define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
#define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
+#define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
# ifndef __ASSEMBLY__
@@ -152,8 +148,8 @@
#define __S011 PAGE_SHARED
#define __S100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
#define __S101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
-#define __S110 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
-#define __S111 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
+#define __S110 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
+#define __S111 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
#define pgd_ERROR(e) printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
#define pmd_ERROR(e) printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
@@ -165,11 +161,6 @@
* addresses:
*/
-/*
- * Given a pointer to an mem_map[] entry, return the kernel virtual
- * address corresponding to that page.
- */
-#define page_address(page) ((page)->virtual)
/* Quick test to see if ADDR is a (potentially) valid physical address. */
static inline long
@@ -178,6 +169,7 @@
return (addr & (local_cpu_data->unimpl_pa_mask)) == 0;
}
+#ifndef CONFIG_DISCONTIGMEM
/*
* kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
* memory. For the return value to be meaningful, ADDR must be >=
@@ -193,6 +185,8 @@
*/
#define kern_addr_valid(addr) (1)
+#endif
+
/*
* Now come the defines and routines to manage and access the three-level
* page table.
@@ -237,8 +231,10 @@
#define pte_none(pte) (!pte_val(pte))
#define pte_present(pte) (pte_val(pte) & (_PAGE_P | _PAGE_PROTNONE))
#define pte_clear(pte) (pte_val(*(pte)) = 0UL)
+#ifndef CONFIG_DISCONTIGMEM
/* pte_page() returns the "struct page *" corresponding to the PTE: */
#define pte_page(pte) (mem_map + (unsigned long) ((pte_val(pte) & _PFN_MASK) >> PAGE_SHIFT))
+#endif
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_bad(pmd) (!ia64_phys_addr_valid(pmd_val(pmd)))
@@ -423,22 +419,6 @@
return pte_val(a) == pte_val(b);
}
-/*
- * Macros to check the type of access that triggered a page fault.
- */
-
-static inline int
-is_write_access (int access_type)
-{
- return (access_type & 0x2);
-}
-
-static inline int
-is_exec_access (int access_type)
-{
- return (access_type & 0x4);
-}
-
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern void paging_init (void);
@@ -463,6 +443,11 @@
/* We provide our own get_unmapped_area to cope with VA holes for userland */
#define HAVE_ARCH_UNMAPPED_AREA
+/*
+ * No page table caches to initialise
+ */
+#define pgtable_cache_init() do { } while (0)
+
# endif /* !__ASSEMBLY__ */
/*
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)