+ if (ncp == (struct nch *)nhp) {
+ nchstats.ncs_miss++;
+ ncp = NULL;
+ } else {
+ if (ncp->nc_id != ncp->nc_ip->i_id) {
+ nchstats.ncs_falsehits++;
+ } else if (!makeentry) {
+ nchstats.ncs_badhits++;
+ } else {
+
+ /*
+ * move this slot to end of LRU
+ * chain, if not already there
+ */
+ if (ncp->nc_nxt) {
+ /* remove from LRU chain */
+ *ncp->nc_prev = ncp->nc_nxt;
+ ncp->nc_nxt->nc_prev = ncp->nc_prev;
+
+ /* and replace at end of it */
+ ncp->nc_nxt = NULL;
+ ncp->nc_prev = nchtail;
+ *nchtail = ncp;
+ nchtail = &ncp->nc_nxt;
+ }
+
+ /*
+ * Get the next inode in the path.
+ * See comment above other `IUNLOCK' code for
+ * an explaination of the locking protocol.
+ */
+ pdp = dp;
+ if (!isdotdot || dp != u.u_rdir)
+ dp = ncp->nc_ip;
+ if (dp == NULL)
+ panic("nami: null cache ino");
+ if (pdp == dp) {
+ dp->i_count++;
+ } else if (isdotdot) {
+ IUNLOCK(pdp);
+ igrab(dp);
+ } else {
+ igrab(dp);
+ IUNLOCK(pdp);
+ }
+
+ /*
+ * Verify that the inode that we got
+ * did not change while we were waiting
+ * for it to be locked.
+ */
+ if (ncp->nc_id != ncp->nc_ip->i_id) {
+ iput(dp);
+ ILOCK(pdp);
+ dp = pdp;
+ nchstats.ncs_falsehits++;
+ } else {
+ ndp->ni_dent.d_ino = dp->i_number;
+ /* ni_dent.d_reclen is garbage ... */
+ nchstats.ncs_goodhits++;
+ goto haveino;
+ }
+ }
+
+ /*
+ * Last component and we are renaming or deleting,
+ * the cache entry is invalid, or otherwise don't
+ * want cache entry to exist.
+ */
+
+ /* remove from LRU chain */
+ *ncp->nc_prev = ncp->nc_nxt;
+ if (ncp->nc_nxt)
+ ncp->nc_nxt->nc_prev = ncp->nc_prev;
+ else
+ nchtail = ncp->nc_prev;
+
+ /* remove from hash chain */
+ remque(ncp);
+
+ /* insert at head of LRU list (first to grab) */
+ ncp->nc_nxt = nchhead;
+ ncp->nc_prev = &nchhead;
+ nchhead->nc_prev = &ncp->nc_nxt;
+ nchhead = ncp;
+
+ /* and make a dummy hash chain */
+ ncp->nc_forw = ncp;
+ ncp->nc_back = ncp;
+
+ ncp = NULL;
+ }
+ }
+
+ /*
+ * Suppress search for slots unless creating
+ * file and at end of pathname, in which case
+ * we watch for a place to put the new file in
+ * case it doesn't already exist.
+ */
+ slotstatus = FOUND;
+ if (flag == CREATE && *cp == 0) {
+ slotstatus = NONE;
+ slotfreespace = 0;
+ slotneeded = DIRSIZ(&ndp->ni_dent);
+ }
+ /*
+ * If this is the same directory that this process
+ * previously searched, pick up where we last left off.
+ * We cache only lookups as these are the most common
+ * and have the greatest payoff. Caching CREATE has little
+ * benefit as it usually must search the entire directory
+ * to determine that the entry does not exist. Caching the
+ * location of the last DELETE has not reduced profiling time
+ * and hence has been removed in the interest of simplicity.
+ */
+ if (flag != LOOKUP || dp->i_number != u.u_ncache.nc_inumber ||
+ dp->i_dev != u.u_ncache.nc_dev) {
+ ndp->ni_offset = 0;
+ numdirpasses = 1;
+ } else {
+ if ((dp->i_flag & ICHG) || dp->i_ctime >= u.u_ncache.nc_time) {
+ if (u.u_ncache.nc_prevoffset > dp->i_size)
+ u.u_ncache.nc_prevoffset = 0;
+ else
+ u.u_ncache.nc_prevoffset &= ~(DIRBLKSIZ - 1);
+ u.u_ncache.nc_time = time.tv_sec;
+ }
+ ndp->ni_offset = u.u_ncache.nc_prevoffset;
+ entryoffsetinblock = blkoff(fs, ndp->ni_offset);
+ if (entryoffsetinblock != 0) {
+ bp = blkatoff(dp, ndp->ni_offset, (char **)0);
+ if (bp == 0)
+ goto bad;
+ }
+ numdirpasses = 2;
+ nchstats.ncs_2passes++;
+ }
+ endsearch = roundup(dp->i_size, DIRBLKSIZ);
+ enduseful = 0;
+
+searchloop:
+ while (ndp->ni_offset < endsearch) {
+ /*
+ * If offset is on a block boundary,
+ * read the next directory block.
+ * Release previous if it exists.
+ */
+ if (blkoff(fs, ndp->ni_offset) == 0) {
+ if (bp != NULL)
+ brelse(bp);
+ bp = blkatoff(dp, ndp->ni_offset, (char **)0);
+ if (bp == 0)
+ goto bad;
+ entryoffsetinblock = 0;
+ }
+
+ /*
+ * If still looking for a slot, and at a DIRBLKSIZE
+ * boundary, have to start looking for free space again.
+ */
+ if (slotstatus == NONE &&
+ (entryoffsetinblock&(DIRBLKSIZ-1)) == 0) {
+ slotoffset = -1;
+ slotfreespace = 0;
+ }
+
+ /*
+ * Get pointer to next entry.
+ * Full validation checks are slow, so we only check
+ * enough to insure forward progress through the
+ * directory. Complete checks can be run by patching
+ * "dirchk" to be true.
+ */
+ ep = (struct direct *)(bp->b_un.b_addr + entryoffsetinblock);
+ if (ep->d_reclen <= 0 ||
+ dirchk && dirbadentry(ep, entryoffsetinblock)) {
+ dirbad(dp, ndp->ni_offset, "mangled entry");
+ i = DIRBLKSIZ - (entryoffsetinblock & (DIRBLKSIZ - 1));
+ ndp->ni_offset += i;
+ entryoffsetinblock += i;
+ continue;
+ }