patch-2.2.8 linux/mm/filemap.c
Next file: linux/mm/mmap.c
Previous file: linux/kernel/signal.c
Back to the patch index
Back to the overall index
- Lines: 484
- Date:
Tue May 11 08:51:13 1999
- Orig file:
v2.2.7/linux/mm/filemap.c
- Orig date:
Tue Mar 23 14:35:48 1999
diff -u --recursive --new-file v2.2.7/linux/mm/filemap.c linux/mm/filemap.c
@@ -34,12 +34,6 @@
unsigned long page_cache_size = 0;
struct page * page_hash_table[PAGE_HASH_SIZE];
-/*
- * Simple routines for both non-shared and shared mappings.
- */
-
-#define release_page(page) __free_page((page))
-
/*
* Define a request structure for outstanding page write requests
* to the background page io daemon
@@ -83,7 +77,7 @@
page->prev = NULL;
remove_page_from_hash_queue(page);
page->inode = NULL;
- __free_page(page);
+ page_cache_release(page);
continue;
}
}
@@ -115,15 +109,15 @@
page->prev = NULL;
remove_page_from_hash_queue(page);
page->inode = NULL;
- __free_page(page);
+ page_cache_release(page);
continue;
}
p = &page->next;
offset = start - offset;
/* partial truncate, clear end of page */
- if (offset < PAGE_SIZE) {
+ if (offset < PAGE_CACHE_SIZE) {
unsigned long address = page_address(page);
- memset((void *) (offset + address), 0, PAGE_SIZE - offset);
+ memset((void *) (offset + address), 0, PAGE_CACHE_SIZE - offset);
flush_page_to_ram(address);
}
}
@@ -136,7 +130,7 @@
{
remove_page_from_hash_queue(page);
remove_page_from_inode_queue(page);
- __free_page(page);
+ page_cache_release(page);
}
int shrink_mmap(int priority, int gfp_mask)
@@ -226,9 +220,9 @@
{
unsigned long offset, len;
- offset = (pos & ~PAGE_MASK);
- pos = pos & PAGE_MASK;
- len = PAGE_SIZE - offset;
+ offset = (pos & ~PAGE_CACHE_MASK);
+ pos = pos & PAGE_CACHE_MASK;
+ len = PAGE_CACHE_SIZE - offset;
do {
struct page * page;
@@ -238,13 +232,13 @@
if (page) {
wait_on_page(page);
memcpy((void *) (offset + page_address(page)), buf, len);
- release_page(page);
+ page_cache_release(page);
}
count -= len;
buf += len;
- len = PAGE_SIZE;
+ len = PAGE_CACHE_SIZE;
offset = 0;
- pos += PAGE_SIZE;
+ pos += PAGE_CACHE_SIZE;
} while (count);
}
@@ -271,10 +265,10 @@
struct page * page;
struct page ** hash;
- offset &= PAGE_MASK;
+ offset &= PAGE_CACHE_MASK;
switch (page_cache) {
case 0:
- page_cache = __get_free_page(GFP_USER);
+ page_cache = page_cache_alloc();
if (!page_cache)
break;
default:
@@ -286,12 +280,12 @@
/*
* Ok, add the new page to the hash-queues...
*/
- page = mem_map + MAP_NR(page_cache);
+ page = page_cache_entry(page_cache);
add_to_page_cache(page, inode, offset, hash);
inode->i_op->readpage(file, page);
page_cache = 0;
}
- release_page(page);
+ page_cache_release(page);
}
return page_cache;
}
@@ -413,7 +407,7 @@
* performances.
* Reasonable means, in this context, not too large but not too small.
* The actual maximum value is:
- * MAX_READAHEAD + PAGE_SIZE = 76k is CONFIG_READA_SMALL is undefined
+ * MAX_READAHEAD + PAGE_CACHE_SIZE = 76k is CONFIG_READA_SMALL is undefined
* and 32K if defined (4K page size assumed).
*
* Asynchronous read-ahead benefits:
@@ -440,7 +434,7 @@
* ONE seems to be the only reasonable value.
* - The total memory pool usage for the file access stream.
* This maximum memory usage is implicitly 2 IO read chunks:
- * 2*(MAX_READAHEAD + PAGE_SIZE) = 156K if CONFIG_READA_SMALL is undefined,
+ * 2*(MAX_READAHEAD + PAGE_CACHE_SIZE) = 156K if CONFIG_READA_SMALL is undefined,
* 64k if defined (4K page size assumed).
*/
@@ -459,7 +453,7 @@
unsigned long raend;
int max_readahead = get_max_readahead(inode);
- raend = filp->f_raend & PAGE_MASK;
+ raend = filp->f_raend & PAGE_CACHE_MASK;
max_ahead = 0;
/*
@@ -476,7 +470,7 @@
if (raend < inode->i_size)
max_ahead = filp->f_ramax;
filp->f_rawin = 0;
- filp->f_ralen = PAGE_SIZE;
+ filp->f_ralen = PAGE_CACHE_SIZE;
if (!max_ahead) {
filp->f_raend = ppos + filp->f_ralen;
filp->f_rawin += filp->f_ralen;
@@ -491,17 +485,17 @@
* it is the moment to try to read ahead asynchronously.
* We will later force unplug device in order to force asynchronous read IO.
*/
- else if (reada_ok && filp->f_ramax && raend >= PAGE_SIZE &&
+ else if (reada_ok && filp->f_ramax && raend >= PAGE_CACHE_SIZE &&
ppos <= raend && ppos + filp->f_ralen >= raend) {
/*
* Add ONE page to max_ahead in order to try to have about the same IO max size
- * as synchronous read-ahead (MAX_READAHEAD + 1)*PAGE_SIZE.
+ * as synchronous read-ahead (MAX_READAHEAD + 1)*PAGE_CACHE_SIZE.
* Compute the position of the last page we have tried to read in order to
* begin to read ahead just at the next page.
*/
- raend -= PAGE_SIZE;
+ raend -= PAGE_CACHE_SIZE;
if (raend < inode->i_size)
- max_ahead = filp->f_ramax + PAGE_SIZE;
+ max_ahead = filp->f_ramax + PAGE_CACHE_SIZE;
if (max_ahead) {
filp->f_rawin = filp->f_ralen;
@@ -516,7 +510,7 @@
*/
ahead = 0;
while (ahead < max_ahead) {
- ahead += PAGE_SIZE;
+ ahead += PAGE_CACHE_SIZE;
page_cache = try_to_read_ahead(filp, raend + ahead,
page_cache);
}
@@ -538,7 +532,7 @@
filp->f_ralen += ahead;
filp->f_rawin += filp->f_ralen;
- filp->f_raend = raend + ahead + PAGE_SIZE;
+ filp->f_raend = raend + ahead + PAGE_CACHE_SIZE;
filp->f_ramax += filp->f_ramax;
@@ -590,7 +584,7 @@
page_cache = 0;
pos = *ppos;
- pgpos = pos & PAGE_MASK;
+ pgpos = pos & PAGE_CACHE_MASK;
/*
* If the current position is outside the previous read-ahead window,
* we reset the current read-ahead context and set read ahead max to zero
@@ -614,12 +608,12 @@
* Then, at least MIN_READAHEAD if read ahead is ok,
* and at most MAX_READAHEAD in all cases.
*/
- if (pos + desc->count <= (PAGE_SIZE >> 1)) {
+ if (pos + desc->count <= (PAGE_CACHE_SIZE >> 1)) {
filp->f_ramax = 0;
} else {
unsigned long needed;
- needed = ((pos + desc->count) & PAGE_MASK) - pgpos;
+ needed = ((pos + desc->count) & PAGE_CACHE_MASK) - pgpos;
if (filp->f_ramax < needed)
filp->f_ramax = needed;
@@ -639,8 +633,8 @@
/*
* Try to find the data in the page cache..
*/
- hash = page_hash(inode, pos & PAGE_MASK);
- page = __find_page(inode, pos & PAGE_MASK, *hash);
+ hash = page_hash(inode, pos & PAGE_CACHE_MASK);
+ page = __find_page(inode, pos & PAGE_CACHE_MASK, *hash);
if (!page)
goto no_cached_page;
@@ -653,7 +647,7 @@
* the page has been rewritten.
*/
if (PageUptodate(page) || PageLocked(page))
- page_cache = generic_file_readahead(reada_ok, filp, inode, pos & PAGE_MASK, page, page_cache);
+ page_cache = generic_file_readahead(reada_ok, filp, inode, pos & PAGE_CACHE_MASK, page, page_cache);
else if (reada_ok && filp->f_ramax > MIN_READAHEAD)
filp->f_ramax = MIN_READAHEAD;
@@ -670,8 +664,8 @@
{
unsigned long offset, nr;
- offset = pos & ~PAGE_MASK;
- nr = PAGE_SIZE - offset;
+ offset = pos & ~PAGE_CACHE_MASK;
+ nr = PAGE_CACHE_SIZE - offset;
if (nr > inode->i_size - pos)
nr = inode->i_size - pos;
@@ -684,7 +678,7 @@
*/
nr = actor(desc, (const char *) (page_address(page) + offset), nr);
pos += nr;
- release_page(page);
+ page_cache_release(page);
if (nr && desc->count)
continue;
break;
@@ -696,7 +690,7 @@
* page..
*/
if (!page_cache) {
- page_cache = __get_free_page(GFP_USER);
+ page_cache = page_cache_alloc();
/*
* That could have slept, so go around to the
* very beginning..
@@ -710,9 +704,9 @@
/*
* Ok, add the new page to the hash-queues...
*/
- page = mem_map + MAP_NR(page_cache);
+ page = page_cache_entry(page_cache);
page_cache = 0;
- add_to_page_cache(page, inode, pos & PAGE_MASK, hash);
+ add_to_page_cache(page, inode, pos & PAGE_CACHE_MASK, hash);
/*
* Error handling is tricky. If we get a read error,
@@ -737,7 +731,7 @@
if (!error)
goto found_page;
desc->error = error;
- release_page(page);
+ page_cache_release(page);
break;
}
@@ -756,7 +750,7 @@
error = -EIO; /* Some unspecified error occurred.. */
}
desc->error = error;
- release_page(page);
+ page_cache_release(page);
break;
}
}
@@ -764,7 +758,7 @@
*ppos = pos;
filp->f_reada = 1;
if (page_cache)
- free_page(page_cache);
+ page_cache_free(page_cache);
UPDATE_ATIME(inode);
}
@@ -962,7 +956,7 @@
* extra page -- better to overlap the allocation with the I/O.
*/
if (no_share && !new_page) {
- new_page = __get_free_page(GFP_USER);
+ new_page = page_cache_alloc();
if (!new_page)
goto failure;
}
@@ -984,7 +978,7 @@
* of any potential extra pages.
*/
if (new_page)
- free_page(new_page);
+ page_cache_free(new_page);
flush_page_to_ram(old_page);
return old_page;
@@ -995,7 +989,7 @@
*/
copy_page(new_page, old_page);
flush_page_to_ram(new_page);
- release_page(page);
+ page_cache_release(page);
return new_page;
no_cached_page:
@@ -1003,14 +997,14 @@
* Try to read in an entire cluster at once.
*/
reada = offset;
- reada >>= PAGE_SHIFT + page_cluster;
- reada <<= PAGE_SHIFT + page_cluster;
+ reada >>= PAGE_CACHE_SHIFT + page_cluster;
+ reada <<= PAGE_CACHE_SHIFT + page_cluster;
- for (i = 1 << page_cluster; i > 0; --i, reada += PAGE_SIZE)
+ for (i = 1 << page_cluster; i > 0; --i, reada += PAGE_CACHE_SIZE)
new_page = try_to_read_ahead(file, reada, new_page);
if (!new_page)
- new_page = __get_free_page(GFP_USER);
+ new_page = page_cache_alloc();
if (!new_page)
goto no_page;
@@ -1027,7 +1021,7 @@
/*
* Now, create a new page-cache page from the page we got
*/
- page = mem_map + MAP_NR(new_page);
+ page = page_cache_entry(new_page);
new_page = 0;
add_to_page_cache(page, inode, offset, hash);
@@ -1061,9 +1055,9 @@
* mm layer so, possibly freeing the page cache page first.
*/
failure:
- release_page(page);
+ page_cache_release(page);
if (new_page)
- free_page(new_page);
+ page_cache_free(new_page);
no_page:
return 0;
}
@@ -1166,7 +1160,7 @@
set_pte(ptep, pte_mkclean(pte));
flush_tlb_page(vma, address);
page = pte_page(pte);
- atomic_inc(&mem_map[MAP_NR(page)].count);
+ atomic_inc(&page_cache_entry(page)->count);
} else {
if (pte_none(pte))
return 0;
@@ -1179,12 +1173,12 @@
}
page = pte_page(pte);
if (!pte_dirty(pte) || flags == MS_INVALIDATE) {
- free_page(page);
+ page_cache_free(page);
return 0;
}
}
error = filemap_write_page(vma, address - vma->vm_start + vma->vm_offset, page, 1);
- free_page(page);
+ page_cache_free(page);
return error;
}
@@ -1492,9 +1486,9 @@
* Try to find the page in the cache. If it isn't there,
* allocate a free page.
*/
- offset = (pos & ~PAGE_MASK);
- pgpos = pos & PAGE_MASK;
- bytes = PAGE_SIZE - offset;
+ offset = (pos & ~PAGE_CACHE_MASK);
+ pgpos = pos & PAGE_CACHE_MASK;
+ bytes = PAGE_CACHE_SIZE - offset;
if (bytes > count)
bytes = count;
@@ -1502,13 +1496,13 @@
page = __find_page(inode, pgpos, *hash);
if (!page) {
if (!page_cache) {
- page_cache = __get_free_page(GFP_USER);
+ page_cache = page_cache_alloc();
if (page_cache)
continue;
status = -ENOMEM;
break;
}
- page = mem_map + MAP_NR(page_cache);
+ page = page_cache_entry(page_cache);
add_to_page_cache(page, inode, pgpos, hash);
page_cache = 0;
}
@@ -1530,7 +1524,7 @@
/* Mark it unlocked again and drop the page.. */
clear_bit(PG_locked, &page->flags);
wake_up(&page->wait);
- __free_page(page);
+ page_cache_release(page);
if (status < 0)
break;
@@ -1545,7 +1539,7 @@
inode->i_size = pos;
if (page_cache)
- free_page(page_cache);
+ page_cache_free(page_cache);
out:
return written ? written : status;
}
@@ -1573,10 +1567,11 @@
if (!page) {
if (!new)
goto out;
- page_cache = get_free_page(GFP_USER);
+ page_cache = page_cache_alloc();
if (!page_cache)
goto out;
- page = mem_map + MAP_NR(page_cache);
+ clear_page(page_cache);
+ page = page_cache_entry(page_cache);
add_to_page_cache(page, inode, offset, hash);
}
if (atomic_read(&page->count) != 2)
@@ -1596,7 +1591,7 @@
*/
void put_cached_page(unsigned long addr)
{
- struct page * page = mem_map + MAP_NR(addr);
+ struct page * page = page_cache_entry(addr);
if (!test_bit(PG_locked, &page->flags))
printk("put_cached_page: page not locked!\n");
@@ -1605,7 +1600,7 @@
atomic_read(&page->count));
clear_bit(PG_locked, &page->flags);
wake_up(&page->wait);
- __free_page(page);
+ page_cache_release(page);
}
@@ -1637,7 +1632,7 @@
{
struct pio_request *p;
- atomic_inc(&mem_map[MAP_NR(page)].count);
+ atomic_inc(&page_cache_entry(page)->count);
/*
* We need to allocate without causing any recursive IO in the
@@ -1726,7 +1721,7 @@
(const char *) p->page, p->offset);
up(&inode->i_sem);
fput(p->file);
- free_page(p->page);
+ page_cache_free(p->page);
kmem_cache_free(pio_request_cache, p);
}
}
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)