patch-2.3.25 linux/mm/page_io.c
Next file: linux/mm/slab.c
Previous file: linux/mm/mremap.c
Back to the patch index
Back to the overall index
- Lines: 38
- Date:
Fri Oct 29 16:45:32 1999
- Orig file:
v2.3.24/linux/mm/page_io.c
- Orig date:
Wed Oct 27 16:34:12 1999
diff -u --recursive --new-file v2.3.24/linux/mm/page_io.c linux/mm/page_io.c
@@ -33,7 +33,7 @@
* that shared pages stay shared while being swapped.
*/
-static int rw_swap_page_base(int rw, pte_t entry, struct page *page, int wait)
+static int rw_swap_page_base(int rw, swp_entry_t entry, struct page *page, int wait)
{
unsigned long type, offset;
struct swap_info_struct * p;
@@ -59,7 +59,7 @@
return 0;
}
if (p->swap_map && !p->swap_map[offset]) {
- pte_ERROR(entry);
+ printk("VM: Bad swap entry %08lx\n", entry.val);
return 0;
}
if (!(p->flags & SWP_USED)) {
@@ -130,7 +130,9 @@
*/
void rw_swap_page(int rw, struct page *page, int wait)
{
- pte_t entry = get_pagecache_pte(page);
+ swp_entry_t entry;
+
+ entry.val = page->index;
if (!PageLocked(page))
PAGE_BUG(page);
@@ -147,7 +149,7 @@
* Therefore we can't use it. Later when we can remove the need for the
* lock map and we can reduce the number of functions exported.
*/
-void rw_swap_page_nolock(int rw, pte_t entry, char *buf, int wait)
+void rw_swap_page_nolock(int rw, swp_entry_t entry, char *buf, int wait)
{
struct page *page = mem_map + MAP_NR(buf);
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)