patch-2.4.19 linux-2.4.19/mm/memory.c
Next file: linux-2.4.19/mm/mmap.c
Previous file: linux-2.4.19/mm/highmem.c
Back to the patch index
Back to the overall index
- Lines: 67
- Date:
Fri Aug 2 17:39:46 2002
- Orig file:
linux-2.4.18/mm/memory.c
- Orig date:
Mon Feb 25 11:38:13 2002
diff -urN linux-2.4.18/mm/memory.c linux-2.4.19/mm/memory.c
@@ -44,6 +44,7 @@
#include <linux/iobuf.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
+#include <linux/module.h>
#include <asm/pgalloc.h>
#include <asm/uaccess.h>
@@ -51,6 +52,7 @@
unsigned long max_mapnr;
unsigned long num_physpages;
+unsigned long num_mappedpages;
void * high_memory;
struct page *highmem_start_page;
@@ -144,6 +146,7 @@
void clear_page_tables(struct mm_struct *mm, unsigned long first, int nr)
{
pgd_t * page_dir = mm->pgd;
+ unsigned long last = first + nr;
spin_lock(&mm->page_table_lock);
page_dir += first;
@@ -153,6 +156,8 @@
} while (--nr);
spin_unlock(&mm->page_table_lock);
+ flush_tlb_pgtables(mm, first * PGDIR_SIZE, last * PGDIR_SIZE);
+
/* keep the page table cache within bounds */
check_pgt_cache();
}
@@ -524,6 +529,8 @@
goto out;
}
+EXPORT_SYMBOL(get_user_pages);
+
/*
* Force in an entire range of pages from the current process's user VA,
* and pin them in physical memory.
@@ -1467,3 +1474,24 @@
len, write, 0, NULL, NULL);
return ret == len ? 0 : -1;
}
+
+struct page * vmalloc_to_page(void * vmalloc_addr)
+{
+ unsigned long addr = (unsigned long) vmalloc_addr;
+ struct page *page = NULL;
+ pmd_t *pmd;
+ pte_t *pte;
+ pgd_t *pgd;
+
+ pgd = pgd_offset_k(addr);
+ if (!pgd_none(*pgd)) {
+ pmd = pmd_offset(pgd, addr);
+ if (!pmd_none(*pmd)) {
+ pte = pte_offset(pmd, addr);
+ if (pte_present(*pte)) {
+ page = pte_page(*pte);
+ }
+ }
+ }
+ return page;
+}
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)