patch-2.4.18 linux/fs/buffer.c
Next file: linux/fs/coda/coda_linux.c
Previous file: linux/fs/block_dev.c
Back to the patch index
Back to the overall index
- Lines: 332
- Date:
Wed Feb 6 21:23:32 2002
- Orig file:
linux.orig/fs/buffer.c
- Orig date:
Mon Feb 18 20:18:40 2002
diff -Naur -X /home/marcelo/lib/dontdiff linux.orig/fs/buffer.c linux/fs/buffer.c
@@ -401,9 +401,9 @@
struct file * file;
struct dentry * dentry;
struct inode * inode;
- int err;
+ int ret, err;
- err = -EBADF;
+ ret = -EBADF;
file = fget(fd);
if (!file)
goto out;
@@ -411,21 +411,27 @@
dentry = file->f_dentry;
inode = dentry->d_inode;
- err = -EINVAL;
- if (!file->f_op || !file->f_op->fsync)
+ ret = -EINVAL;
+ if (!file->f_op || !file->f_op->fsync) {
+ /* Why? We can still call filemap_fdatasync */
goto out_putf;
+ }
/* We need to protect against concurrent writers.. */
down(&inode->i_sem);
- filemap_fdatasync(inode->i_mapping);
+ ret = filemap_fdatasync(inode->i_mapping);
err = file->f_op->fsync(file, dentry, 0);
- filemap_fdatawait(inode->i_mapping);
+ if (err && !ret)
+ ret = err;
+ err = filemap_fdatawait(inode->i_mapping);
+ if (err && !ret)
+ ret = err;
up(&inode->i_sem);
out_putf:
fput(file);
out:
- return err;
+ return ret;
}
asmlinkage long sys_fdatasync(unsigned int fd)
@@ -433,9 +439,9 @@
struct file * file;
struct dentry * dentry;
struct inode * inode;
- int err;
+ int ret, err;
- err = -EBADF;
+ ret = -EBADF;
file = fget(fd);
if (!file)
goto out;
@@ -443,20 +449,24 @@
dentry = file->f_dentry;
inode = dentry->d_inode;
- err = -EINVAL;
+ ret = -EINVAL;
if (!file->f_op || !file->f_op->fsync)
goto out_putf;
down(&inode->i_sem);
- filemap_fdatasync(inode->i_mapping);
+ ret = filemap_fdatasync(inode->i_mapping);
err = file->f_op->fsync(file, dentry, 1);
- filemap_fdatawait(inode->i_mapping);
+ if (err && !ret)
+ ret = err;
+ err = filemap_fdatawait(inode->i_mapping);
+ if (err && !ret)
+ ret = err;
up(&inode->i_sem);
out_putf:
fput(file);
out:
- return err;
+ return ret;
}
/* After several hours of tedious analysis, the following hash
@@ -1512,6 +1522,7 @@
int err, i;
unsigned long block;
struct buffer_head *bh, *head;
+ int need_unlock;
if (!PageLocked(page))
BUG();
@@ -1567,8 +1578,34 @@
return 0;
out:
+ /*
+ * ENOSPC, or some other error. We may already have added some
+ * blocks to the file, so we need to write these out to avoid
+ * exposing stale data.
+ */
ClearPageUptodate(page);
- UnlockPage(page);
+ bh = head;
+ need_unlock = 1;
+ /* Recovery: lock and submit the mapped buffers */
+ do {
+ if (buffer_mapped(bh)) {
+ lock_buffer(bh);
+ set_buffer_async_io(bh);
+ need_unlock = 0;
+ }
+ bh = bh->b_this_page;
+ } while (bh != head);
+ do {
+ struct buffer_head *next = bh->b_this_page;
+ if (buffer_mapped(bh)) {
+ set_bit(BH_Uptodate, &bh->b_state);
+ clear_bit(BH_Dirty, &bh->b_state);
+ submit_bh(WRITE, bh);
+ }
+ bh = next;
+ } while (bh != head);
+ if (need_unlock)
+ UnlockPage(page);
return err;
}
@@ -1599,6 +1636,7 @@
continue;
if (block_start >= to)
break;
+ clear_bit(BH_New, &bh->b_state);
if (!buffer_mapped(bh)) {
err = get_block(inode, block, bh, 1);
if (err)
@@ -1633,12 +1671,35 @@
*/
while(wait_bh > wait) {
wait_on_buffer(*--wait_bh);
- err = -EIO;
if (!buffer_uptodate(*wait_bh))
- goto out;
+ return -EIO;
}
return 0;
out:
+ /*
+ * Zero out any newly allocated blocks to avoid exposing stale
+ * data. If BH_New is set, we know that the block was newly
+ * allocated in the above loop.
+ */
+ bh = head;
+ block_start = 0;
+ do {
+ block_end = block_start+blocksize;
+ if (block_end <= from)
+ goto next_bh;
+ if (block_start >= to)
+ break;
+ if (buffer_new(bh)) {
+ if (buffer_uptodate(bh))
+ printk(KERN_ERR "%s: zeroing uptodate buffer!\n", __FUNCTION__);
+ memset(kaddr+block_start, 0, bh->b_size);
+ set_bit(BH_Uptodate, &bh->b_state);
+ mark_buffer_dirty(bh);
+ }
+next_bh:
+ block_start = block_end;
+ bh = bh->b_this_page;
+ } while (bh != head);
return err;
}
@@ -1760,6 +1821,52 @@
return 0;
}
+/* utility function for filesystems that need to do work on expanding
+ * truncates. Uses prepare/commit_write to allow the filesystem to
+ * deal with the hole.
+ */
+int generic_cont_expand(struct inode *inode, loff_t size)
+{
+ struct address_space *mapping = inode->i_mapping;
+ struct page *page;
+ unsigned long index, offset, limit;
+ int err;
+
+ err = -EFBIG;
+ limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
+ if (limit != RLIM_INFINITY && size > (loff_t)limit) {
+ send_sig(SIGXFSZ, current, 0);
+ goto out;
+ }
+ if (size > inode->i_sb->s_maxbytes)
+ goto out;
+
+ offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
+
+ /* ugh. in prepare/commit_write, if from==to==start of block, we
+ ** skip the prepare. make sure we never send an offset for the start
+ ** of a block
+ */
+ if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
+ offset++;
+ }
+ index = size >> PAGE_CACHE_SHIFT;
+ err = -ENOMEM;
+ page = grab_cache_page(mapping, index);
+ if (!page)
+ goto out;
+ err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
+ if (!err) {
+ err = mapping->a_ops->commit_write(NULL, page, offset, offset);
+ }
+ UnlockPage(page);
+ page_cache_release(page);
+ if (err > 0)
+ err = 0;
+out:
+ return err;
+}
+
/*
* For moronic filesystems that do not allow holes in file.
* We may have to extend the file.
@@ -1989,6 +2096,48 @@
goto done;
}
+/*
+ * Commence writeout of all the buffers against a page. The
+ * page must be locked. Returns zero on success or a negative
+ * errno.
+ */
+int writeout_one_page(struct page *page)
+{
+ struct buffer_head *bh, *head = page->buffers;
+
+ if (!PageLocked(page))
+ BUG();
+ bh = head;
+ do {
+ if (buffer_locked(bh) || !buffer_dirty(bh) || !buffer_uptodate(bh))
+ continue;
+
+ bh->b_flushtime = jiffies;
+ ll_rw_block(WRITE, 1, &bh);
+ } while ((bh = bh->b_this_page) != head);
+ return 0;
+}
+EXPORT_SYMBOL(writeout_one_page);
+
+/*
+ * Wait for completion of I/O of all buffers against a page. The page
+ * must be locked. Returns zero on success or a negative errno.
+ */
+int waitfor_one_page(struct page *page)
+{
+ int error = 0;
+ struct buffer_head *bh, *head = page->buffers;
+
+ bh = head;
+ do {
+ wait_on_buffer(bh);
+ if (buffer_req(bh) && !buffer_uptodate(bh))
+ error = -EIO;
+ } while ((bh = bh->b_this_page) != head);
+ return error;
+}
+EXPORT_SYMBOL(waitfor_one_page);
+
int generic_block_bmap(struct address_space *mapping, long block, get_block_t *get_block)
{
struct buffer_head tmp;
@@ -2003,8 +2152,10 @@
{
int i, nr_blocks, retval;
unsigned long * blocks = iobuf->blocks;
+ int length;
- nr_blocks = iobuf->length / blocksize;
+ length = iobuf->length;
+ nr_blocks = length / blocksize;
/* build the blocklist */
for (i = 0; i < nr_blocks; i++, blocknr++) {
struct buffer_head bh;
@@ -2014,8 +2165,14 @@
bh.b_size = blocksize;
retval = get_block(inode, blocknr, &bh, rw == READ ? 0 : 1);
- if (retval)
- goto out;
+ if (retval) {
+ if (!i)
+ /* report error to userspace */
+ goto out;
+ else
+ /* do short I/O utill 'i' */
+ break;
+ }
if (rw == READ) {
if (buffer_new(&bh))
@@ -2034,9 +2191,13 @@
blocks[i] = bh.b_blocknr;
}
+ /* patch length to handle short I/O */
+ iobuf->length = i * blocksize;
retval = brw_kiovec(rw, 1, &iobuf, inode->i_dev, iobuf->blocks, blocksize);
-
+ /* restore orig length */
+ iobuf->length = length;
out:
+
return retval;
}
@@ -2562,6 +2723,9 @@
printk("Buffer memory: %6dkB\n",
atomic_read(&buffermem_pages) << (PAGE_SHIFT-10));
+
+ printk("Cache memory: %6dkB\n",
+ (atomic_read(&page_cache_size)- atomic_read(&buffermem_pages)) << (PAGE_SHIFT-10));
#ifdef CONFIG_SMP /* trylock does nothing on UP and so we could deadlock */
if (!spin_trylock(&lru_list_lock))
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)