patch-2.3.24 linux/mm/filemap.c
Next file: linux/mm/mmap.c
Previous file: linux/kernel/ksyms.c
Back to the patch index
Back to the overall index
- Lines: 331
- Date:
Tue Oct 26 11:12:02 1999
- Orig file:
v2.3.23/linux/mm/filemap.c
- Orig date:
Fri Oct 22 13:21:55 1999
diff -u --recursive --new-file v2.3.23/linux/mm/filemap.c linux/mm/filemap.c
@@ -89,7 +89,7 @@
spin_lock(&pagecache_lock);
remove_page_from_inode_queue(page);
remove_page_from_hash_queue(page);
- page->inode = NULL;
+ page->mapping = NULL;
spin_unlock(&pagecache_lock);
}
@@ -98,8 +98,7 @@
struct list_head *head, *curr;
struct page * page;
- head = &inode->i_pages;
-repeat:
+ head = &inode->i_data.pages;
spin_lock(&pagecache_lock);
curr = head->next;
@@ -115,7 +114,7 @@
remove_page_from_inode_queue(page);
remove_page_from_hash_queue(page);
- page->inode = NULL;
+ page->mapping = NULL;
page_cache_release(page);
}
spin_unlock(&pagecache_lock);
@@ -133,7 +132,7 @@
int partial = 0;
repeat:
- head = &inode->i_pages;
+ head = &inode->i_data.pages;
spin_lock(&pagecache_lock);
curr = head->next;
while (curr != head) {
@@ -273,7 +272,7 @@
if (!try_to_free_buffers(page))
goto unlock_continue;
/* page was locked, inode can't go away under us */
- if (!page->inode) {
+ if (!page->mapping) {
atomic_dec(&buffermem_pages);
goto made_buffer_progress;
}
@@ -299,14 +298,13 @@
}
/* is it a page-cache page? */
- if (page->inode)
- {
+ if (page->mapping) {
dispose = &old;
if (!pgcache_under_min())
{
remove_page_from_inode_queue(page);
remove_page_from_hash_queue(page);
- page->inode = NULL;
+ page->mapping = NULL;
spin_unlock(&pagecache_lock);
goto made_inode_progress;
}
@@ -358,7 +356,7 @@
return ret;
}
-static inline struct page * __find_page_nolock(struct inode * inode, unsigned long offset, struct page *page)
+static inline struct page * __find_page_nolock(struct address_space *mapping, unsigned long offset, struct page *page)
{
goto inside;
@@ -367,7 +365,7 @@
inside:
if (!page)
goto not_found;
- if (page->inode != inode)
+ if (page->mapping != mapping)
continue;
if (page->offset == offset)
break;
@@ -418,7 +416,7 @@
struct page *page;
int retval = 0;
- head = &inode->i_pages;
+ head = &inode->i_data.pages;
start &= PAGE_MASK;
spin_lock(&pagecache_lock);
@@ -469,7 +467,7 @@
* owned by us, referenced, but not uptodate and with no errors.
*/
static inline void __add_to_page_cache(struct page * page,
- struct inode * inode, unsigned long offset,
+ struct address_space *mapping, unsigned long offset,
struct page **hash)
{
struct page *alias;
@@ -477,37 +475,36 @@
flags = page->flags & ~((1 << PG_uptodate) | (1 << PG_error) | (1 << PG_referenced));
page->flags = flags | (1 << PG_locked);
- page->owner = current; /* REMOVEME */
get_page(page);
page->offset = offset;
- add_page_to_inode_queue(inode, page);
+ add_page_to_inode_queue(mapping, page);
__add_page_to_hash_queue(page, hash);
lru_cache_add(page);
- alias = __find_page_nolock(inode, offset, *hash);
+ alias = __find_page_nolock(mapping, offset, *hash);
if (alias != page)
BUG();
}
-void add_to_page_cache(struct page * page, struct inode * inode, unsigned long offset)
+void add_to_page_cache(struct page * page, struct address_space * mapping, unsigned long offset)
{
spin_lock(&pagecache_lock);
- __add_to_page_cache(page, inode, offset, page_hash(inode, offset));
+ __add_to_page_cache(page, mapping, offset, page_hash(mapping, offset));
spin_unlock(&pagecache_lock);
}
int add_to_page_cache_unique(struct page * page,
- struct inode * inode, unsigned long offset,
+ struct address_space *mapping, unsigned long offset,
struct page **hash)
{
int err;
struct page *alias;
spin_lock(&pagecache_lock);
- alias = __find_page_nolock(inode, offset, *hash);
+ alias = __find_page_nolock(mapping, offset, *hash);
err = 1;
if (!alias) {
- __add_to_page_cache(page,inode,offset,hash);
+ __add_to_page_cache(page,mapping,offset,hash);
err = 0;
}
@@ -522,11 +519,11 @@
static inline void page_cache_read(struct file * file, unsigned long offset)
{
struct inode *inode = file->f_dentry->d_inode;
- struct page **hash = page_hash(inode, offset);
+ struct page **hash = page_hash(&inode->i_data, offset);
struct page *page;
spin_lock(&pagecache_lock);
- page = __find_page_nolock(inode, offset, *hash);
+ page = __find_page_nolock(&inode->i_data, offset, *hash);
spin_unlock(&pagecache_lock);
if (page)
return;
@@ -535,7 +532,7 @@
if (!page)
return;
- if (!add_to_page_cache_unique(page, inode, offset, hash)) {
+ if (!add_to_page_cache_unique(page, &inode->i_data, offset, hash)) {
inode->i_op->readpage(file, page);
page_cache_release(page);
return;
@@ -605,7 +602,7 @@
* a rather lightweight function, finding and getting a reference to a
* hashed page atomically, waiting for it if it's locked.
*/
-struct page * __find_get_page (struct inode * inode,
+struct page * __find_get_page (struct address_space *mapping,
unsigned long offset, struct page **hash)
{
struct page *page;
@@ -616,7 +613,7 @@
*/
repeat:
spin_lock(&pagecache_lock);
- page = __find_page_nolock(inode, offset, *hash);
+ page = __find_page_nolock(mapping, offset, *hash);
if (page)
get_page(page);
spin_unlock(&pagecache_lock);
@@ -655,7 +652,7 @@
/*
* Get the lock to a page atomically.
*/
-struct page * __find_lock_page (struct inode * inode,
+struct page * __find_lock_page (struct address_space *mapping,
unsigned long offset, struct page **hash)
{
struct page *page;
@@ -666,7 +663,7 @@
*/
repeat:
spin_lock(&pagecache_lock);
- page = __find_page_nolock(inode, offset, *hash);
+ page = __find_page_nolock(mapping, offset, *hash);
if (page)
get_page(page);
spin_unlock(&pagecache_lock);
@@ -1005,10 +1002,10 @@
/*
* Try to find the data in the page cache..
*/
- hash = page_hash(inode, pos & PAGE_CACHE_MASK);
+ hash = page_hash(&inode->i_data, pos & PAGE_CACHE_MASK);
spin_lock(&pagecache_lock);
- page = __find_page_nolock(inode, pos & PAGE_CACHE_MASK, *hash);
+ page = __find_page_nolock(&inode->i_data, pos & PAGE_CACHE_MASK, *hash);
if (!page)
goto no_cached_page;
found_page:
@@ -1104,7 +1101,7 @@
* dropped the page cache lock. Check for that.
*/
spin_lock(&pagecache_lock);
- page = __find_page_nolock(inode, pos & PAGE_CACHE_MASK, *hash);
+ page = __find_page_nolock(&inode->i_data, pos & PAGE_CACHE_MASK, *hash);
if (page)
goto found_page;
}
@@ -1113,7 +1110,7 @@
* Ok, add the new page to the hash-queues...
*/
page = cached_page;
- __add_to_page_cache(page, inode, pos & PAGE_CACHE_MASK, hash);
+ __add_to_page_cache(page, &inode->i_data, pos & PAGE_CACHE_MASK, hash);
spin_unlock(&pagecache_lock);
cached_page = NULL;
@@ -1316,9 +1313,9 @@
/*
* Do we have something in the page cache already?
*/
- hash = page_hash(inode, offset);
+ hash = page_hash(&inode->i_data, offset);
retry_find:
- page = __find_get_page(inode, offset, hash);
+ page = __find_get_page(&inode->i_data, offset, hash);
if (!page)
goto no_cached_page;
@@ -1441,27 +1438,25 @@
return retval;
}
-static int filemap_write_page(struct vm_area_struct * vma,
+static int filemap_write_page(struct file *file,
unsigned long offset,
struct page * page,
int wait)
{
int result;
- struct file * file;
struct dentry * dentry;
struct inode * inode;
- file = vma->vm_file;
dentry = file->f_dentry;
inode = dentry->d_inode;
/*
* If a task terminates while we're swapping the page, the vma and
- * and file could be released ... increment the count to be safe.
+ * and file could be released: try_to_swap_out has done a get_file.
+ * vma/file is guaranteed to exist in the unmap/sync cases because
+ * mmap_sem is held.
*/
- get_file(file);
result = do_write_page(inode, file, page, offset);
- fput(file);
return result;
}
@@ -1472,9 +1467,9 @@
* at the same time..
*/
extern void wakeup_bdflush(int);
-int filemap_swapout(struct vm_area_struct * vma, struct page * page)
+int filemap_swapout(struct page * page, struct file * file)
{
- int retval = filemap_write_page(vma, page->offset, page, 0);
+ int retval = filemap_write_page(file, page->offset, page, 0);
wakeup_bdflush(0);
return retval;
}
@@ -1515,7 +1510,7 @@
}
if (PageHighMem(page))
BUG();
- error = filemap_write_page(vma, address - vma->vm_start + vma->vm_offset, page, 1);
+ error = filemap_write_page(vma->vm_file, address - vma->vm_start + vma->vm_offset, page, 1);
page_cache_free(page);
return error;
}
@@ -1821,9 +1816,9 @@
if (bytes > count)
bytes = count;
- hash = page_hash(inode, pgpos);
+ hash = page_hash(&inode->i_data, pgpos);
repeat_find:
- page = __find_lock_page(inode, pgpos, hash);
+ page = __find_lock_page(&inode->i_data, pgpos, hash);
if (!page) {
if (!cached_page) {
cached_page = page_cache_alloc();
@@ -1833,7 +1828,7 @@
break;
}
page = cached_page;
- if (add_to_page_cache_unique(page,inode,pgpos,hash))
+ if (add_to_page_cache_unique(page,&inode->i_data,pgpos,hash))
goto repeat_find;
cached_page = NULL;
@@ -1842,10 +1837,6 @@
/* We have exclusive IO access to the page.. */
if (!PageLocked(page)) {
PAGE_BUG(page);
- } else {
- if (page->owner != current) {
- PAGE_BUG(page);
- }
}
status = write_one_page(file, page, offset, bytes, buf);
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)