DragonFly BSD
DragonFly submit List (threaded) for 2005-04
[Date Prev][Date Next]  [Thread Prev][Thread Next]  [Date Index][Thread Index]

Re: Convert i_next field in struct inode to a LIST


From: Dheeraj Reddy <dheeraj@xxxxxxxxxxxxxx>
Date: 08 Apr 2005 19:47:19 -0400

This suggestion would need list traversal twice.

The attached patch is better imho.

Is it commit ready yet ?

truely
dheeraj
Index: inode.h
===================================================================
RCS file: /home/dcvs/src/sys/vfs/ufs/inode.h,v
retrieving revision 1.9
diff -u -r1.9 inode.h
--- inode.h	30 Dec 2004 07:01:52 -0000	1.9
+++ inode.h	7 Apr 2005 11:24:18 -0000
@@ -81,7 +81,7 @@
  * active, and is put back when the file is no longer being used.
  */
 struct inode {
-	struct inode	*i_next;/* Hash chain */
+	SLIST_ENTRY(inode) i_hash; /* Hash chain */
 	struct	vnode  *i_vnode;/* Vnode associated with this inode. */
 	struct	vnode  *i_devvp;/* Vnode for block I/O. */
 	uint32_t i_flag;	/* flags, see below */
Index: ufs_ihash.c
===================================================================
RCS file: /home/dcvs/src/sys/vfs/ufs/ufs_ihash.c,v
retrieving revision 1.15
diff -u -r1.15 ufs_ihash.c
--- ufs_ihash.c	20 Jan 2005 18:08:54 -0000	1.15
+++ ufs_ihash.c	8 Apr 2005 15:20:51 -0000
@@ -50,8 +50,11 @@
 static MALLOC_DEFINE(M_UFSIHASH, "UFS ihash", "UFS Inode hash tables");
 /*
  * Structures associated with inode cacheing.
- */
+
 static struct inode **ihashtbl;
+ */
+
+static SLIST_HEAD(ihashhead, inode) *ihashtbl;
 static u_long	ihash;		/* size of hash table - 1 */
 static struct lwkt_token ufs_ihash_token;
 
@@ -79,10 +82,15 @@
 ufs_ihashlookup(dev_t dev, ino_t inum)
 {
 	struct inode *ip;
+	struct ihashhead *ipp;
 	lwkt_tokref ilock;
+	
 
 	lwkt_gettoken(&ilock, &ufs_ihash_token);
-	for (ip = *INOHASH(dev, inum); ip; ip = ip->i_next) {
+
+	ipp = INOHASH(dev, inum);
+
+	SLIST_FOREACH(ip, ipp, i_hash) {
 		if (inum == ip->i_number && dev == ip->i_dev)
 			break;
 	}
@@ -108,10 +116,12 @@
 	lwkt_tokref ilock;
 	struct inode *ip;
 	struct vnode *vp;
+	struct ihashhead *ipp;	
 
 	lwkt_gettoken(&ilock, &ufs_ihash_token);
+	ipp = INOHASH(dev, inum);
 loop:
-	for (ip = *INOHASH(dev, inum); ip; ip = ip->i_next) {
+	SLIST_FOREACH(ip, ipp, i_hash) {
 		if (inum != ip->i_number || dev != ip->i_dev)
 			continue;
 		vp = ITOV(ip);
@@ -121,7 +131,7 @@
 		 * We must check to see if the inode has been ripped
 		 * out from under us after blocking.
 		 */
-		for (ip = *INOHASH(dev, inum); ip; ip = ip->i_next) {
+		SLIST_FOREACH(ip, ipp, i_hash) {
 			if (inum == ip->i_number && dev == ip->i_dev)
 				break;
 		}
@@ -146,9 +156,11 @@
 {
 	lwkt_tokref ilock;
 	struct inode *ip;
+	struct ihashhead *ipp;
 
 	lwkt_gettoken(&ilock, &ufs_ihash_token);
-	for (ip = *INOHASH(dev, inum); ip; ip = ip->i_next) {
+	ipp = INOHASH(dev, inum);
+	SLIST_FOREACH(ip, ipp, i_hash) {
 		if (inum == ip->i_number && dev == ip->i_dev) {
 			if (ip->i_vnode) {
 			    printf("conflict with vnode %p", ip->i_vnode);
@@ -169,22 +181,21 @@
 int
 ufs_ihashins(struct inode *ip)
 {
-	struct inode **ipp;
 	struct inode *iq;
 	lwkt_tokref ilock;
+	struct ihashhead *ipp;
 
 	KKASSERT((ip->i_flag & IN_HASHED) == 0);
 	lwkt_gettoken(&ilock, &ufs_ihash_token);
 	ipp = INOHASH(ip->i_dev, ip->i_number);
-	while ((iq = *ipp) != NULL) {
-		if (ip->i_dev == iq->i_dev && ip->i_number == iq->i_number) {
+    	
+    	SLIST_FOREACH(iq, ipp, i_hash) {
+	       	if (ip->i_dev == iq->i_dev && ip->i_number == iq->i_number) {
 			lwkt_reltoken(&ilock);
 			return(EBUSY);
 		}
-		ipp = &iq->i_next;
 	}
-	ip->i_next = NULL;
-	*ipp = ip;
+	SLIST_INSERT_HEAD(ipp, ip, i_hash);
 	ip->i_flag |= IN_HASHED;
 	lwkt_reltoken(&ilock);
 	return(0);
@@ -197,20 +208,29 @@
 ufs_ihashrem(struct inode *ip)
 {
 	lwkt_tokref ilock;
-	struct inode **ipp;
-	struct inode *iq;
+	struct ihashhead *ipp;
 
 	lwkt_gettoken(&ilock, &ufs_ihash_token);
 	if (ip->i_flag & IN_HASHED) {
 		ipp = INOHASH(ip->i_dev, ip->i_number);
-		while ((iq = *ipp) != NULL) {
-			if (ip == iq)
-				break;
-			ipp = &iq->i_next;
+
+/*
+		SLIST_REMOVE(ipp, ip, inode, i_hash);
+*/
+
+		if (SLIST_FIRST(ipp) == (ip)) {
+			SLIST_REMOVE_HEAD((ipp), i_hash);
+		} else {
+			struct inode *iq;
+			iq = SLIST_FIRST(ipp);
+			while (SLIST_NEXT(iq, i_hash) != ip)
+				iq = SLIST_NEXT(iq, i_hash);
+			KKASSERT(SLIST_NEXT(iq, i_hash) == ip);
+			SLIST_NEXT(iq, i_hash) =
+				SLIST_NEXT(SLIST_NEXT(iq, i_hash), i_hash);
 		}
-		KKASSERT(ip == iq);
-		*ipp = ip->i_next;
-		ip->i_next = NULL;
+
+
 		ip->i_flag &= ~IN_HASHED;
 	}
 	lwkt_reltoken(&ilock);
Joerg Sonnenberger <joerg@xxxxxxxxxxxxxxxxx> writes:

>> snipped <<
-- 
"Nature wants us to react, return blow for blow, cheating for cheating, lie for
lie, and then it requires a Divine power not to hit-back, keep control and 
remain unattached, and act with prudence." 


[Date Prev][Date Next]  [Thread Prev][Thread Next]  [Date Index][Thread Index]