patch-2.3.25 linux/fs/buffer.c
Next file: linux/fs/coda/file.c
Previous file: linux/fs/binfmt_em86.c
Back to the patch index
Back to the overall index
- Lines: 138
- Date:
Mon Nov 1 11:11:48 1999
- Orig file:
v2.3.24/linux/fs/buffer.c
- Orig date:
Wed Oct 27 16:34:12 1999
diff -u --recursive --new-file v2.3.24/linux/fs/buffer.c linux/fs/buffer.c
@@ -58,7 +58,7 @@
6};
#define BUFSIZE_INDEX(X) ((int) buffersize_index[(X)>>9])
-#define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
+#define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512)
#define NR_RESERVED (2*MAX_BUF_PER_PAGE)
#define MAX_UNUSED_BUFFERS NR_RESERVED+20 /* don't ever have more than this
number of unused buffer heads */
@@ -823,7 +823,7 @@
unsigned long dirty, tot, hard_dirty_limit, soft_dirty_limit;
dirty = size_buffers_type[BUF_DIRTY] >> PAGE_SHIFT;
- tot = nr_lru_pages + nr_free_pages + nr_free_highpages;
+ tot = nr_lru_pages + nr_free_pages - nr_free_highpages;
hard_dirty_limit = tot * bdf_prm.b_un.nfract / 100;
soft_dirty_limit = hard_dirty_limit >> 1;
@@ -1293,13 +1293,17 @@
static void unmap_underlying_metadata(struct buffer_head * bh)
{
#if 0
- bh = get_hash_table(bh->b_dev, bh->b_blocknr, bh->b_size);
- if (bh) {
- unmap_buffer(bh);
- /* Here we could run brelse or bforget. We use
- bforget because it will try to put the buffer
- in the freelist. */
- __bforget(bh);
+ if (buffer_new(bh)) {
+ struct buffer_head *old_bh;
+
+ old_bh = get_hash_table(bh->b_dev, bh->b_blocknr, bh->b_size);
+ if (old_bh) {
+ unmap_buffer(old_bh);
+ /* Here we could run brelse or bforget. We use
+ bforget because it will try to put the buffer
+ in the freelist. */
+ __bforget(old_bh);
+ }
}
#endif
}
@@ -1313,7 +1317,7 @@
struct dentry *dentry = file->f_dentry;
struct inode *inode = dentry->d_inode;
int err, i;
- unsigned long block, offset;
+ unsigned long block;
struct buffer_head *bh, *head;
if (!PageLocked(page))
@@ -1323,12 +1327,10 @@
create_empty_buffers(page, inode, inode->i_sb->s_blocksize);
head = page->buffers;
- offset = page->offset;
- block = offset >> inode->i_sb->s_blocksize_bits;
-
- // FIXME: currently we assume page alignment.
- if (offset & (PAGE_SIZE-1))
- BUG();
+ /* The page cache is now PAGE_CACHE_SIZE aligned, period. We handle old a.out
+ * and others via unaligned private mappings.
+ */
+ block = page->index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
bh = head;
i = 0;
@@ -1389,8 +1391,8 @@
head = page->buffers;
bbits = inode->i_sb->s_blocksize_bits;
- block = page->offset >> bbits;
- blocks = PAGE_SIZE >> bbits;
+ block = page->index << (PAGE_CACHE_SHIFT - bbits);
+ blocks = PAGE_CACHE_SIZE >> bbits;
start_block = offset >> bbits;
end_block = (offset + bytes - 1) >> bbits;
start_offset = offset & (blocksize - 1);
@@ -1409,9 +1411,6 @@
BUG();
if (end_block < 0 || end_block >= blocks)
BUG();
- // FIXME: currently we assume page alignment.
- if (page->offset & (PAGE_SIZE-1))
- BUG();
i = 0;
bh = head;
@@ -1537,12 +1536,12 @@
unsigned long data_offset = offset;
int need_balance_dirty;
- offset = inode->i_size - page->offset;
- if (page->offset>inode->i_size)
+ offset = inode->i_size - (page->index << PAGE_CACHE_SHIFT);
+ if (page->index > (inode->i_size >> PAGE_CACHE_SHIFT))
offset = 0;
else if (offset >= data_offset)
offset = data_offset;
- bytes += data_offset-offset;
+ bytes += data_offset - offset;
target_buf = (char *)page_address(page) + offset;
target_data = (char *)page_address(page) + data_offset;
@@ -1556,8 +1555,8 @@
head = page->buffers;
bbits = inode->i_sb->s_blocksize_bits;
- block = page->offset >> bbits;
- blocks = PAGE_SIZE >> bbits;
+ block = page->index << (PAGE_CACHE_SHIFT - bbits);
+ blocks = PAGE_CACHE_SIZE >> bbits;
start_block = offset >> bbits;
end_block = (offset + bytes - 1) >> bbits;
start_offset = offset & (blocksize - 1);
@@ -1576,9 +1575,6 @@
BUG();
if (end_block < 0 || end_block >= blocks)
BUG();
- // FIXME: currently we assume page alignment.
- if (page->offset & (PAGE_SIZE-1))
- BUG();
i = 0;
bh = head;
@@ -2015,8 +2011,8 @@
create_empty_buffers(page, inode, blocksize);
head = page->buffers;
- blocks = PAGE_SIZE >> inode->i_sb->s_blocksize_bits;
- iblock = page->offset >> inode->i_sb->s_blocksize_bits;
+ blocks = PAGE_CACHE_SIZE >> inode->i_sb->s_blocksize_bits;
+ iblock = page->index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
bh = head;
nr = 0;
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)