patch-2.4.13 linux/mm/filemap.c

Next file: linux/mm/highmem.c
Previous file: linux/lib/vsprintf.c
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.4.12/linux/mm/filemap.c linux/mm/filemap.c
@@ -667,8 +667,7 @@
 static int FASTCALL(page_cache_read(struct file * file, unsigned long offset));
 static int page_cache_read(struct file * file, unsigned long offset)
 {
-	struct inode *inode = file->f_dentry->d_inode;
-	struct address_space *mapping = inode->i_mapping;
+	struct address_space *mapping = file->f_dentry->d_inode->i_mapping;
 	struct page **hash = page_hash(mapping, offset);
 	struct page *page; 
 
@@ -1521,6 +1520,53 @@
 	return retval;
 }
 
+static ssize_t do_readahead(struct file *file, unsigned long index, unsigned long nr)
+{
+	struct address_space *mapping = file->f_dentry->d_inode->i_mapping;
+	unsigned long max;
+
+	if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage)
+		return -EINVAL;
+
+	/* Limit it to the size of the file.. */
+	max = (mapping->host->i_size + ~PAGE_CACHE_MASK) >> PAGE_CACHE_SHIFT;
+	if (index > max)
+		return 0;
+	max -= index;
+	if (nr > max)
+		nr = max;
+
+	/* And limit it to a sane percentage of the inactive list.. */
+	max = nr_inactive_pages / 2;
+	if (nr > max)
+		nr = max;
+
+	while (nr) {
+		page_cache_read(file, index);
+		index++;
+		nr--;
+	}
+	return 0;
+}
+
+asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
+{
+	ssize_t ret;
+	struct file *file;
+
+	ret = -EBADF;
+	file = fget(fd);
+	if (file) {
+		if (file->f_mode & FMODE_READ) {
+			unsigned long start = offset >> PAGE_CACHE_SHIFT;
+			unsigned long len = (count + ((long)offset & ~PAGE_CACHE_MASK)) >> PAGE_CACHE_SHIFT;
+			ret = do_readahead(file, start, len);
+		}
+		fput(file);
+	}
+	return ret;
+}
+
 /*
  * Read-ahead and flush behind for MADV_SEQUENTIAL areas.  Since we are
  * sure this is sequential access, we don't need a flexible read-ahead
@@ -1589,12 +1635,13 @@
 {
 	int error;
 	struct file *file = area->vm_file;
-	struct inode *inode = file->f_dentry->d_inode;
-	struct address_space *mapping = inode->i_mapping;
+	struct address_space *mapping = file->f_dentry->d_inode->i_mapping;
+	struct inode *inode = mapping->host;
 	struct page *page, **hash, *old_page;
-	unsigned long size, pgoff;
+	unsigned long size, pgoff, endoff;
 
 	pgoff = ((address - area->vm_start) >> PAGE_CACHE_SHIFT) + area->vm_pgoff;
+	endoff = ((area->vm_end - area->vm_start) >> PAGE_CACHE_SHIFT) + area->vm_pgoff;
 
 retry_all:
 	/*
@@ -1605,6 +1652,10 @@
 	if ((pgoff >= size) && (area->vm_mm == current->mm))
 		return NULL;
 
+	/* The "size" of the file, as far as mmap is concerned, isn't bigger than the mapping */
+	if (size > endoff)
+		size = endoff;
+
 	/*
 	 * Do we have something in the page cache already?
 	 */
@@ -1851,15 +1902,14 @@
 
 int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
 {
-	struct inode *inode = file->f_dentry->d_inode;
+	struct address_space *mapping = file->f_dentry->d_inode->i_mapping;
+	struct inode *inode = mapping->host;
 
 	if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) {
-		if (!inode->i_mapping->a_ops->writepage)
+		if (!mapping->a_ops->writepage)
 			return -EINVAL;
 	}
-	if (!inode->i_sb || !S_ISREG(inode->i_mode))
-		return -EACCES;
-	if (!inode->i_mapping->a_ops->readpage)
+	if (!mapping->a_ops->readpage)
 		return -ENOEXEC;
 	UPDATE_ATIME(inode);
 	vma->vm_ops = &generic_file_vm_ops;
@@ -2309,7 +2359,7 @@
 	unsigned long pgoff)
 {
 	unsigned char present = 0;
-	struct address_space * as = &vma->vm_file->f_dentry->d_inode->i_data;
+	struct address_space * as = vma->vm_file->f_dentry->d_inode->i_mapping;
 	struct page * page, ** hash = page_hash(as, pgoff);
 
 	spin_lock(&pagecache_lock);

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)