patch-2.1.130 linux/mm/vmscan.c
Next file: linux/net/ipv4/ip_masq.c
Previous file: linux/mm/vmalloc.c
Back to the patch index
Back to the overall index
- Lines: 60
- Date:
Tue Nov 24 09:40:28 1998
- Orig file:
v2.1.129/linux/mm/vmscan.c
- Orig date:
Thu Nov 19 09:56:29 1998
diff -u --recursive --new-file v2.1.129/linux/mm/vmscan.c linux/mm/vmscan.c
@@ -10,22 +10,14 @@
* Version: $Id: vmscan.c,v 1.5 1998/02/23 22:14:28 sct Exp $
*/
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
+#include <linux/slab.h>
#include <linux/kernel_stat.h>
-#include <linux/errno.h>
-#include <linux/string.h>
#include <linux/swap.h>
#include <linux/swapctl.h>
#include <linux/smp_lock.h>
-#include <linux/slab.h>
-#include <linux/dcache.h>
-#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/init.h>
-#include <asm/bitops.h>
#include <asm/pgtable.h>
/*
@@ -170,7 +162,7 @@
* copy in memory, so we add it to the swap
* cache. */
if (PageSwapCache(page_map)) {
- free_page_and_swap_cache(page);
+ free_page(page);
return (atomic_read(&page_map->count) == 0);
}
add_to_swap_cache(page_map, entry);
@@ -188,7 +180,7 @@
* asynchronously. That's no problem, shrink_mmap() can
* correctly clean up the occassional unshared page
* which gets left behind in the swap cache. */
- free_page_and_swap_cache(page);
+ free_page(page);
return 1; /* we slept: the process may not exist any more */
}
@@ -202,7 +194,7 @@
set_pte(page_table, __pte(entry));
flush_tlb_page(vma, address);
swap_duplicate(entry);
- free_page_and_swap_cache(page);
+ free_page(page);
return (atomic_read(&page_map->count) == 0);
}
/*
@@ -218,7 +210,7 @@
flush_cache_page(vma, address);
pte_clear(page_table);
flush_tlb_page(vma, address);
- entry = page_unuse(page_map);
+ entry = (atomic_read(&page_map->count) == 1);
__free_page(page_map);
return entry;
}
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov