patch-2.1.55 linux/fs/inode.c
Next file: linux/fs/isofs/dir.c
Previous file: linux/fs/hpfs/hpfs_fs.c
Back to the patch index
Back to the overall index
- Lines: 177
- Date:
Mon Sep 8 22:01:05 1997
- Orig file:
v2.1.54/linux/fs/inode.c
- Orig date:
Sun Sep 7 13:10:43 1997
diff -u --recursive --new-file v2.1.54/linux/fs/inode.c linux/fs/inode.c
@@ -43,7 +43,10 @@
static struct list_head inode_hashtable[HASH_SIZE];
/*
- * A simple spinlock to protect the list manipulations
+ * A simple spinlock to protect the list manipulations.
+ *
+ * NOTE! You also have to own the lock if you change
+ * the i_state of an inode while it is in use..
*/
spinlock_t inode_lock = SPIN_LOCK_UNLOCKED;
@@ -59,7 +62,16 @@
int max_inodes = NR_INODE;
/*
- * Put the inode on the super block's dirty list
+ * Put the inode on the super block's dirty list.
+ *
+ * CAREFUL! We mark it dirty unconditionally, but
+ * move it onto the dirty list only if it is hashed.
+ * If it was not hashed, it will never be added to
+ * the dirty list even if it is later hashed, as it
+ * will have been marked dirty already.
+ *
+ * In short, make sure you hash any inodes _before_
+ * you start marking them dirty..
*/
void __mark_inode_dirty(struct inode *inode)
{
@@ -67,18 +79,18 @@
if (sb) {
spin_lock(&inode_lock);
- list_del(&inode->i_list);
- list_add(&inode->i_list, &sb->s_dirty);
+ if (!(inode->i_state & I_DIRTY)) {
+ inode->i_state |= I_DIRTY;
+ /* Only add valid (ie hashed) inodes to the dirty list */
+ if (!list_empty(&inode->i_hash)) {
+ list_del(&inode->i_list);
+ list_add(&inode->i_list, &sb->s_dirty);
+ }
+ }
spin_unlock(&inode_lock);
}
}
-static inline void unlock_inode(struct inode *inode)
-{
- clear_bit(I_LOCK, &inode->i_state);
- wake_up(&inode->i_wait);
-}
-
static void __wait_on_inode(struct inode * inode)
{
struct wait_queue wait = { current, NULL };
@@ -86,7 +98,7 @@
add_wait_queue(&inode->i_wait, &wait);
repeat:
current->state = TASK_UNINTERRUPTIBLE;
- if (test_bit(I_LOCK, &inode->i_state)) {
+ if (inode->i_state & I_LOCK) {
schedule();
goto repeat;
}
@@ -96,7 +108,7 @@
static inline void wait_on_inode(struct inode *inode)
{
- if (test_bit(I_LOCK, &inode->i_state))
+ if (inode->i_state & I_LOCK)
__wait_on_inode(inode);
}
@@ -146,31 +158,34 @@
inode->i_sb->s_op->write_inode(inode);
}
-static inline void sync_one(struct list_head *head, struct list_head *clean,
- struct list_head *placement, struct inode *inode)
+static inline void sync_one(struct inode *inode)
{
- list_del(placement);
- if (test_bit(I_LOCK, &inode->i_state)) {
- list_add(placement, head);
+ if (inode->i_state & I_LOCK) {
spin_unlock(&inode_lock);
__wait_on_inode(inode);
+ spin_lock(&inode_lock);
} else {
- list_add(placement, clean);
- clear_bit(I_DIRTY, &inode->i_state);
- set_bit(I_LOCK, &inode->i_state);
+ list_del(&inode->i_list);
+ list_add(&inode->i_list, &inode_in_use);
+
+ /* Set I_LOCK, reset I_DIRTY */
+ inode->i_state ^= I_DIRTY | I_LOCK;
spin_unlock(&inode_lock);
+
write_inode(inode);
- unlock_inode(inode);
+
+ spin_lock(&inode_lock);
+ inode->i_state &= ~I_LOCK;
+ wake_up(&inode->i_wait);
}
- spin_lock(&inode_lock);
}
-static inline void sync_list(struct list_head *head, struct list_head *clean)
+static inline void sync_list(struct list_head *head)
{
struct list_head * tmp;
while ((tmp = head->prev) != head)
- sync_one(head, clean, tmp, list_entry(tmp, struct inode, i_list));
+ sync_one(list_entry(tmp, struct inode, i_list));
}
/*
@@ -192,7 +207,7 @@
if (dev && sb->s_dev != dev)
continue;
- sync_list(&sb->s_dirty, &inode_in_use);
+ sync_list(&sb->s_dirty);
if (dev)
break;
}
@@ -208,9 +223,8 @@
if (sb) {
spin_lock(&inode_lock);
- if (test_bit(I_DIRTY, &inode->i_state))
- sync_one(&sb->s_dirty, &inode_in_use, &inode->i_list,
- inode);
+ if (inode->i_state & I_DIRTY)
+ sync_one(inode);
spin_unlock(&inode_lock);
}
else
@@ -393,7 +407,6 @@
static inline void read_inode(struct inode *inode, struct super_block *sb)
{
sb->s_op->read_inode(inode);
- unlock_inode(inode);
}
struct inode * get_empty_inode(void)
@@ -451,10 +464,24 @@
inode->i_ino = ino;
inode->i_flags = sb->s_flags;
inode->i_count = 1;
- inode->i_state = 1 << I_LOCK;
+ inode->i_state = I_LOCK;
spin_unlock(&inode_lock);
+
clean_inode(inode);
read_inode(inode, sb);
+
+ /*
+ * This is special! We do not need the spinlock
+ * when clearing I_LOCK, because we're guaranteed
+ * that nobody else tries to do anything about the
+ * state of the inode when it is locked, as we
+ * just created it (so there can be no old holders
+ * that haven't tested I_LOCK).
+ *
+ * Verify this some day!
+ */
+ inode->i_state &= ~I_LOCK;
+
return inode;
}
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov