patch-2.2.2 linux/fs/inode.c
Next file: linux/fs/lockd/clntproc.c
Previous file: linux/fs/hfs/file_hdr.c
Back to the patch index
Back to the overall index
- Lines: 222
- Date:
Mon Feb 22 11:47:42 1999
- Orig file:
v2.2.1/linux/fs/inode.c
- Orig date:
Mon Jan 25 17:44:34 1999
diff -u --recursive --new-file v2.2.1/linux/fs/inode.c linux/fs/inode.c
@@ -62,9 +62,8 @@
struct {
int nr_inodes;
int nr_free_inodes;
- int preshrink; /* pre-shrink dcache? */
- int dummy[4];
-} inodes_stat = {0, 0, 0,};
+ int dummy[5];
+} inodes_stat = {0, 0,};
int max_inodes;
@@ -196,6 +195,19 @@
}
/*
+ * Called with the spinlock already held..
+ */
+static void sync_all_inodes(void)
+{
+ struct super_block * sb = sb_entry(super_blocks.next);
+ for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.next)) {
+ if (!sb->s_dev)
+ continue;
+ sync_list(&sb->s_dirty);
+ }
+}
+
+/*
* Needed by knfsd
*/
void write_inode_now(struct inode *inode)
@@ -232,13 +244,15 @@
/*
* Dispose-list gets a local list, so it doesn't need to
- * worry about list corruption.
+ * worry about list corruption. It releases the inode lock
+ * while clearing the inodes.
*/
static void dispose_list(struct list_head * head)
{
struct list_head *next;
int count = 0;
+ spin_unlock(&inode_lock);
next = head->next;
for (;;) {
struct list_head * tmp = next;
@@ -256,7 +270,6 @@
spin_lock(&inode_lock);
list_splice(head, &inode_unused);
inodes_stat.nr_free_inodes += count;
- spin_unlock(&inode_lock);
}
/*
@@ -305,65 +318,53 @@
spin_lock(&inode_lock);
busy = invalidate_list(&inode_in_use, sb, &throw_away);
busy |= invalidate_list(&sb->s_dirty, sb, &throw_away);
- spin_unlock(&inode_lock);
-
dispose_list(&throw_away);
+ spin_unlock(&inode_lock);
return busy;
}
/*
* This is called with the inode lock held. It searches
- * the in-use for the specified number of freeable inodes.
- * Freeable inodes are moved to a temporary list and then
- * placed on the unused list by dispose_list.
+ * the in-use for freeable inodes, which are moved to a
+ * temporary list and then placed on the unused list by
+ * dispose_list.
+ *
+ * We don't expect to have to call this very often.
*
- * Note that we do not expect to have to search very hard:
- * the freeable inodes will be at the old end of the list.
- *
- * N.B. The spinlock is released to call dispose_list.
+ * N.B. The spinlock is released during the call to
+ * dispose_list.
*/
#define CAN_UNUSE(inode) \
- (((inode)->i_count == 0) && \
- (!(inode)->i_state))
+ (((inode)->i_count | (inode)->i_state) == 0)
+#define INODE(entry) (list_entry(entry, struct inode, i_list))
-static int free_inodes(int goal)
+static int free_inodes(void)
{
- struct list_head *tmp, *head = &inode_in_use;
- LIST_HEAD(freeable);
- int found = 0, depth = goal << 1;
+ struct list_head list, *entry, *freeable = &list;
+ int found = 0;
- while ((tmp = head->prev) != head && depth--) {
- struct inode * inode = list_entry(tmp, struct inode, i_list);
+ INIT_LIST_HEAD(freeable);
+ entry = inode_in_use.next;
+ while (entry != &inode_in_use) {
+ struct list_head *tmp = entry;
+
+ entry = entry->next;
+ if (!CAN_UNUSE(INODE(tmp)))
+ continue;
list_del(tmp);
- if (CAN_UNUSE(inode)) {
- list_del(&inode->i_hash);
- INIT_LIST_HEAD(&inode->i_hash);
- list_add(tmp, &freeable);
- if (++found < goal)
- continue;
- break;
- }
- list_add(tmp, head);
+ list_del(&INODE(tmp)->i_hash);
+ INIT_LIST_HEAD(&INODE(tmp)->i_hash);
+ list_add(tmp, freeable);
+ found = 1;
}
+
if (found) {
- spin_unlock(&inode_lock);
- dispose_list(&freeable);
- spin_lock(&inode_lock);
+ dispose_list(freeable);
+ found = 1; /* silly compiler */
}
- return found;
-}
-static void shrink_dentry_inodes(int goal)
-{
- int found;
-
- spin_unlock(&inode_lock);
- found = select_dcache(goal, 0);
- if (found < goal)
- found = goal;
- prune_dcache(found);
- spin_lock(&inode_lock);
+ return found;
}
/*
@@ -373,9 +374,23 @@
*/
static void try_to_free_inodes(int goal)
{
- shrink_dentry_inodes(goal);
- if (!free_inodes(goal))
- shrink_dentry_inodes(goal);
+ /*
+ * First stry to just get rid of unused inodes.
+ *
+ * If we can't reach our goal that way, we'll have
+ * to try to shrink the dcache and sync existing
+ * inodes..
+ */
+ free_inodes();
+ goal -= inodes_stat.nr_free_inodes;
+ if (goal > 0) {
+ spin_unlock(&inode_lock);
+ select_dcache(goal, 0);
+ prune_dcache(goal);
+ spin_lock(&inode_lock);
+ sync_all_inodes();
+ free_inodes();
+ }
}
/*
@@ -385,7 +400,7 @@
void free_inode_memory(int goal)
{
spin_lock(&inode_lock);
- free_inodes(goal);
+ free_inodes();
spin_unlock(&inode_lock);
}
@@ -403,9 +418,9 @@
/*
* Check whether to restock the unused list.
*/
- if (inodes_stat.preshrink) {
+ if (inodes_stat.nr_inodes > max_inodes) {
struct list_head *tmp;
- try_to_free_inodes(8);
+ try_to_free_inodes(inodes_stat.nr_inodes >> 2);
tmp = inode_unused.next;
if (tmp != &inode_unused) {
inodes_stat.nr_free_inodes--;
@@ -436,9 +451,6 @@
*/
inodes_stat.nr_inodes += INODES_PER_PAGE;
inodes_stat.nr_free_inodes += INODES_PER_PAGE - 1;
- inodes_stat.preshrink = 0;
- if (inodes_stat.nr_inodes > max_inodes)
- inodes_stat.preshrink = 1;
return inode;
}
@@ -447,10 +459,9 @@
* the dcache and then try again to free some inodes.
*/
prune_dcache(inodes_stat.nr_inodes >> 2);
- inodes_stat.preshrink = 1;
spin_lock(&inode_lock);
- free_inodes(inodes_stat.nr_inodes >> 2);
+ free_inodes();
{
struct list_head *tmp = inode_unused.next;
if (tmp != &inode_unused) {
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)