patch-2.2.7 linux/fs/buffer.c
Next file: linux/fs/coda/dir.c
Previous file: linux/fs/autofs/root.c
Back to the patch index
Back to the overall index
- Lines: 107
- Date:
Tue Apr 27 14:34:41 1999
- Orig file:
v2.2.6/linux/fs/buffer.c
- Orig date:
Mon Mar 29 11:09:11 1999
diff -u --recursive --new-file v2.2.6/linux/fs/buffer.c linux/fs/buffer.c
@@ -486,33 +486,6 @@
remove_from_lru_list(bh);
}
-static inline void put_last_lru(struct buffer_head * bh)
-{
- if (bh) {
- struct buffer_head **bhp = &lru_list[bh->b_list];
-
- if (bh == *bhp) {
- *bhp = bh->b_next_free;
- return;
- }
-
- if(bh->b_dev == B_FREE)
- panic("Wrong block for lru list");
-
- /* Add to back of free list. */
- remove_from_lru_list(bh);
- if(!*bhp) {
- *bhp = bh;
- (*bhp)->b_prev_free = bh;
- }
-
- bh->b_next_free = *bhp;
- bh->b_prev_free = (*bhp)->b_prev_free;
- (*bhp)->b_prev_free->b_next_free = bh;
- (*bhp)->b_prev_free = bh;
- }
-}
-
static inline void put_last_free(struct buffer_head * bh)
{
if (bh) {
@@ -726,8 +699,6 @@
bh = get_hash_table(dev, block, size);
if (bh) {
if (!buffer_dirty(bh)) {
- if (buffer_uptodate(bh))
- put_last_lru(bh);
bh->b_flushtime = 0;
}
return bh;
@@ -833,6 +804,7 @@
/* If dirty, mark the time this buffer should be written back. */
set_writetime(buf, 0);
refile_buffer(buf);
+ touch_buffer(buf);
if (buf->b_count) {
buf->b_count--;
@@ -854,6 +826,7 @@
return;
}
buf->b_count = 0;
+ buf->b_state = 0;
remove_from_queues(buf);
put_last_free(buf);
}
@@ -867,7 +840,6 @@
struct buffer_head * bh;
bh = getblk(dev, block, size);
- touch_buffer(bh);
if (buffer_uptodate(bh))
return bh;
ll_rw_block(READ, 1, &bh);
@@ -904,7 +876,6 @@
bh = getblk(dev, block, bufsize);
index = BUFSIZE_INDEX(bh->b_size);
- touch_buffer(bh);
if (buffer_uptodate(bh))
return(bh);
else ll_rw_block(READ, 1, &bh);
@@ -1525,13 +1496,27 @@
* Use gfp() for the hash table to decrease TLB misses, use
* SLAB cache for buffer heads.
*/
-void __init buffer_init(void)
+void __init buffer_init(unsigned long memory_size)
{
- int order = 5; /* Currently maximum order.. */
+ int order;
unsigned int nr_hash;
- nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct buffer_head *);
- hash_table = (struct buffer_head **) __get_free_pages(GFP_ATOMIC, order);
+ /* we need to guess at the right sort of size for a buffer cache.
+ the heuristic from working with large databases and getting
+ fsync times (ext2) manageable, is the following */
+
+ memory_size >>= 20;
+ for (order = 5; (1UL << order) < memory_size; order++);
+
+ /* try to allocate something until we get it or we're asking
+ for something that is really too small */
+
+ do {
+ nr_hash = (1UL << order) * PAGE_SIZE /
+ sizeof(struct buffer_head *);
+ hash_table = (struct buffer_head **)
+ __get_free_pages(GFP_ATOMIC, order);
+ } while (hash_table == NULL && --order > 4);
if (!hash_table)
panic("Failed to allocate buffer hash table\n");
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)