patch-2.3.26 linux/mm/memory.c
Next file: linux/mm/page_alloc.c
Previous file: linux/mm/filemap.c
Back to the patch index
Back to the overall index
- Lines: 78
- Date:
Thu Nov 4 19:36:33 1999
- Orig file:
v2.3.25/linux/mm/memory.c
- Orig date:
Mon Nov 1 13:56:27 1999
diff -u --recursive --new-file v2.3.25/linux/mm/memory.c linux/mm/memory.c
@@ -58,9 +58,9 @@
* a common occurrence (no need to read the page to know
* that it's zero - better for the cache and memory subsystem).
*/
-static inline void copy_cow_page(struct page * from, struct page * to)
+static inline void copy_cow_page(struct page * from, struct page * to, unsigned long address)
{
- if (from == ZERO_PAGE(to)) {
+ if (from == ZERO_PAGE(address)) {
clear_highpage(to);
return;
}
@@ -416,11 +416,11 @@
* Given a physical address, is there a useful struct page pointing to it?
*/
-struct page * get_page_map(struct page *page)
+struct page * get_page_map(struct page *page, unsigned long vaddr)
{
if (MAP_NR(page) >= max_mapnr)
return 0;
- if (page == ZERO_PAGE(page))
+ if (page == ZERO_PAGE(vaddr))
return 0;
if (PageReserved(page))
return 0;
@@ -484,7 +484,7 @@
dprintk (KERN_ERR "Missing page in map_user_kiobuf\n");
goto retry;
}
- map = get_page_map(map);
+ map = get_page_map(map, ptr);
if (map) {
if (TryLockPage(map)) {
goto retry;
@@ -743,7 +743,7 @@
return 0;
}
flush_page_to_ram(page);
- set_pte(pte, pte_mkwrite(page_pte_prot(page, PAGE_COPY)));
+ set_pte(pte, pte_mkwrite(mk_pte(page, PAGE_COPY)));
/* no need for flush_tlb */
return page;
}
@@ -819,7 +819,7 @@
if (pte_val(*page_table) == pte_val(pte)) {
if (PageReserved(old_page))
++vma->vm_mm->rss;
- copy_cow_page(old_page, new_page);
+ copy_cow_page(old_page, new_page, address);
flush_page_to_ram(new_page);
flush_cache_page(vma, address);
set_pte(page_table, pte_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot))));
@@ -1051,8 +1051,7 @@
* As this is called only for pages that do not currently exist, we
* do not need to flush old virtual caches or the TLB.
*
- * This is called with the MM semaphore and the kernel lock held.
- * We need to release the kernel lock as soon as possible..
+ * This is called with the MM semaphore held.
*/
static int do_no_page(struct task_struct * tsk, struct vm_area_struct * vma,
unsigned long address, int write_access, pte_t *page_table)
@@ -1069,10 +1068,10 @@
* essentially an early COW detection.
*/
new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, (vma->vm_flags & VM_SHARED)?0:write_access);
- if (!new_page)
- return 0; /* SIGBUS - but we _really_ should know whether it is OOM or SIGBUS */
- if (new_page == (struct page *)-1)
- return -1; /* OOM */
+ if (new_page == NULL) /* no page was available -- SIGBUS */
+ return 0;
+ if (new_page == NOPAGE_OOM)
+ return -1;
++tsk->maj_flt;
++vma->vm_mm->rss;
/*
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)