patch-2.4.18 linux/mm/filemap.c
Next file: linux/mm/memory.c
Previous file: linux/kernel/time.c
Back to the patch index
Back to the overall index
- Lines: 219
- Date:
Tue Jan 22 23:04:47 2002
- Orig file:
linux.orig/mm/filemap.c
- Orig date:
Mon Feb 18 20:18:40 2002
diff -Naur -X /home/marcelo/lib/dontdiff linux.orig/mm/filemap.c linux/mm/filemap.c
@@ -454,41 +454,6 @@
return page;
}
-/*
- * By the time this is called, the page is locked and
- * we don't have to worry about any races any more.
- *
- * Start the IO..
- */
-static int writeout_one_page(struct page *page)
-{
- struct buffer_head *bh, *head = page->buffers;
-
- bh = head;
- do {
- if (buffer_locked(bh) || !buffer_dirty(bh) || !buffer_uptodate(bh))
- continue;
-
- bh->b_flushtime = jiffies;
- ll_rw_block(WRITE, 1, &bh);
- } while ((bh = bh->b_this_page) != head);
- return 0;
-}
-
-int waitfor_one_page(struct page *page)
-{
- int error = 0;
- struct buffer_head *bh, *head = page->buffers;
-
- bh = head;
- do {
- wait_on_buffer(bh);
- if (buffer_req(bh) && !buffer_uptodate(bh))
- error = -EIO;
- } while ((bh = bh->b_this_page) != head);
- return error;
-}
-
static int do_buffer_fdatasync(struct list_head *head, unsigned long start, unsigned long end, int (*fn)(struct page *))
{
struct list_head *curr;
@@ -582,8 +547,9 @@
* @mapping: address space structure to write
*
*/
-void filemap_fdatasync(struct address_space * mapping)
+int filemap_fdatasync(struct address_space * mapping)
{
+ int ret = 0;
int (*writepage)(struct page *) = mapping->a_ops->writepage;
spin_lock(&pagecache_lock);
@@ -603,8 +569,11 @@
lock_page(page);
if (PageDirty(page)) {
+ int err;
ClearPageDirty(page);
- writepage(page);
+ err = writepage(page);
+ if (err && !ret)
+ ret = err;
} else
UnlockPage(page);
@@ -612,6 +581,7 @@
spin_lock(&pagecache_lock);
}
spin_unlock(&pagecache_lock);
+ return ret;
}
/**
@@ -621,8 +591,10 @@
* @mapping: address space structure to wait for
*
*/
-void filemap_fdatawait(struct address_space * mapping)
+int filemap_fdatawait(struct address_space * mapping)
{
+ int ret = 0;
+
spin_lock(&pagecache_lock);
while (!list_empty(&mapping->locked_pages)) {
@@ -638,11 +610,14 @@
spin_unlock(&pagecache_lock);
___wait_on_page(page);
+ if (PageError(page))
+ ret = -EIO;
page_cache_release(page);
spin_lock(&pagecache_lock);
}
spin_unlock(&pagecache_lock);
+ return ret;
}
/*
@@ -941,7 +916,6 @@
spin_unlock(&pagecache_lock);
if (!page) {
struct page *newpage = alloc_page(gfp_mask);
- page = NULL;
if (newpage) {
spin_lock(&pagecache_lock);
page = __find_lock_page_helper(mapping, index, *hash);
@@ -1520,12 +1494,14 @@
goto out_free;
/*
- * Flush to disk exlusively the _data_, metadata must remains
+ * Flush to disk exclusively the _data_, metadata must remain
* completly asynchronous or performance will go to /dev/null.
*/
- filemap_fdatasync(mapping);
- retval = fsync_inode_data_buffers(inode);
- filemap_fdatawait(mapping);
+ retval = filemap_fdatasync(mapping);
+ if (retval == 0)
+ retval = fsync_inode_data_buffers(inode);
+ if (retval == 0)
+ retval = filemap_fdatawait(mapping);
if (retval < 0)
goto out_free;
@@ -2142,26 +2118,45 @@
* The msync() system call.
*/
+/*
+ * MS_SYNC syncs the entire file - including mappings.
+ *
+ * MS_ASYNC initiates writeout of just the dirty mapped data.
+ * This provides no guarantee of file integrity - things like indirect
+ * blocks may not have started writeout. MS_ASYNC is primarily useful
+ * where the application knows that it has finished with the data and
+ * wishes to intelligently schedule its own I/O traffic.
+ */
static int msync_interval(struct vm_area_struct * vma,
unsigned long start, unsigned long end, int flags)
{
+ int ret = 0;
struct file * file = vma->vm_file;
+
if (file && (vma->vm_flags & VM_SHARED)) {
- int error;
- error = filemap_sync(vma, start, end-start, flags);
+ ret = filemap_sync(vma, start, end-start, flags);
- if (!error && (flags & MS_SYNC)) {
+ if (!ret && (flags & (MS_SYNC|MS_ASYNC))) {
struct inode * inode = file->f_dentry->d_inode;
+
down(&inode->i_sem);
- filemap_fdatasync(inode->i_mapping);
- if (file->f_op && file->f_op->fsync)
- error = file->f_op->fsync(file, file->f_dentry, 1);
- filemap_fdatawait(inode->i_mapping);
+ ret = filemap_fdatasync(inode->i_mapping);
+ if (flags & MS_SYNC) {
+ int err;
+
+ if (file->f_op && file->f_op->fsync) {
+ err = file->f_op->fsync(file, file->f_dentry, 1);
+ if (err && !ret)
+ ret = err;
+ }
+ err = filemap_fdatawait(inode->i_mapping);
+ if (err && !ret)
+ ret = err;
+ }
up(&inode->i_sem);
}
- return error;
}
- return 0;
+ return ret;
}
asmlinkage long sys_msync(unsigned long start, size_t len, int flags)
@@ -3005,7 +3000,7 @@
kaddr = kmap(page);
status = mapping->a_ops->prepare_write(file, page, offset, offset+bytes);
if (status)
- goto unlock;
+ goto sync_failure;
page_fault = __copy_from_user(kaddr+offset, buf, bytes);
flush_dcache_page(page);
status = mapping->a_ops->commit_write(file, page, offset, offset+bytes);
@@ -3030,6 +3025,7 @@
if (status < 0)
break;
} while (count);
+done:
*ppos = pos;
if (cached_page)
@@ -3051,6 +3047,18 @@
fail_write:
status = -EFAULT;
goto unlock;
+
+sync_failure:
+ /*
+ * If blocksize < pagesize, prepare_write() may have instantiated a
+ * few blocks outside i_size. Trim these off again.
+ */
+ kunmap(page);
+ UnlockPage(page);
+ page_cache_release(page);
+ if (pos + bytes > inode->i_size)
+ vmtruncate(inode, inode->i_size);
+ goto done;
o_direct:
written = generic_file_direct_IO(WRITE, file, (char *) buf, count, pos);
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)