convert VOP_UNLOCK and vrele into vput's; add proc parameter to union_dircache
[unix-history] / usr / src / sys / kern / kern_lock.c
index 65b0b83..0b3fe9e 100644 (file)
@@ -8,18 +8,25 @@
  *
  * %sccs.include.redist.c%
  *
  *
  * %sccs.include.redist.c%
  *
- *     @(#)kern_lock.c 8.6 (Berkeley) %G%
+ *     @(#)kern_lock.c 8.13 (Berkeley) %G%
  */
 
 #include <sys/param.h>
 #include <sys/proc.h>
 #include <sys/lock.h>
  */
 
 #include <sys/param.h>
 #include <sys/proc.h>
 #include <sys/lock.h>
+#include <machine/cpu.h>
 
 /*
  * Locking primitives implementation.
  * Locks provide shared/exclusive sychronization.
  */
 
 
 /*
  * Locking primitives implementation.
  * Locks provide shared/exclusive sychronization.
  */
 
+#ifdef DEBUG
+#define COUNT(p, x) if (p) (p)->p_locks += (x)
+#else
+#define COUNT(p, x)
+#endif
+
 #if NCPUS > 1
 
 /*
 #if NCPUS > 1
 
 /*
@@ -33,11 +40,11 @@ int lock_wait_time = 100;
                if (lock_wait_time > 0) {                               \
                        int i;                                          \
                                                                        \
                if (lock_wait_time > 0) {                               \
                        int i;                                          \
                                                                        \
-                       atomic_unlock(&lkp->lk_interlock);              \
+                       simple_unlock(&lkp->lk_interlock);              \
                        for (i = lock_wait_time; i > 0; i--)            \
                                if (!(wanted))                          \
                                        break;                          \
                        for (i = lock_wait_time; i > 0; i--)            \
                                if (!(wanted))                          \
                                        break;                          \
-                       atomic_lock(&lkp->lk_interlock);                \
+                       simple_lock(&lkp->lk_interlock);                \
                }                                                       \
                if (!(wanted))                                          \
                        break;
                }                                                       \
                if (!(wanted))                                          \
                        break;
@@ -46,7 +53,7 @@ int lock_wait_time = 100;
 
 /*
  * It is an error to spin on a uniprocessor as nothing will ever cause
 
 /*
  * It is an error to spin on a uniprocessor as nothing will ever cause
- * the atomic lock to clear while we are executing.
+ * the simple lock to clear while we are executing.
  */
 #define PAUSE(lkp, wanted)
 
  */
 #define PAUSE(lkp, wanted)
 
@@ -59,10 +66,10 @@ int lock_wait_time = 100;
        PAUSE(lkp, wanted);                                             \
        for (error = 0; wanted; ) {                                     \
                (lkp)->lk_waitcount++;                                  \
        PAUSE(lkp, wanted);                                             \
        for (error = 0; wanted; ) {                                     \
                (lkp)->lk_waitcount++;                                  \
-               atomic_unlock(&(lkp)->lk_interlock);                    \
+               simple_unlock(&(lkp)->lk_interlock);                    \
                error = tsleep((void *)lkp, (lkp)->lk_prio,             \
                    (lkp)->lk_wmesg, (lkp)->lk_timo);                   \
                error = tsleep((void *)lkp, (lkp)->lk_prio,             \
                    (lkp)->lk_wmesg, (lkp)->lk_timo);                   \
-               atomic_lock(&(lkp)->lk_interlock);                      \
+               simple_lock(&(lkp)->lk_interlock);                      \
                (lkp)->lk_waitcount--;                                  \
                if (error)                                              \
                        break;                                          \
                (lkp)->lk_waitcount--;                                  \
                if (error)                                              \
                        break;                                          \
@@ -76,15 +83,16 @@ int lock_wait_time = 100;
  * Initialize a lock; required before use.
  */
 void
  * Initialize a lock; required before use.
  */
 void
-lock_init(lkp, prio, wmesg, timo, flags)
+lockinit(lkp, prio, wmesg, timo, flags)
        struct lock *lkp;
        int prio;
        char *wmesg;
        int timo;
        int flags;
 {
        struct lock *lkp;
        int prio;
        char *wmesg;
        int timo;
        int flags;
 {
+
        bzero(lkp, sizeof(struct lock));
        bzero(lkp, sizeof(struct lock));
-       atomic_lock_init(&lkp->lk_interlock);
+       simple_lock_init(&lkp->lk_interlock);
        lkp->lk_flags = flags & LK_EXTFLG_MASK;
        lkp->lk_prio = prio;
        lkp->lk_timo = timo;
        lkp->lk_flags = flags & LK_EXTFLG_MASK;
        lkp->lk_prio = prio;
        lkp->lk_timo = timo;
@@ -101,12 +109,12 @@ lockstatus(lkp)
 {
        int lock_type = 0;
 
 {
        int lock_type = 0;
 
-       atomic_lock(&lkp->lk_interlock);
+       simple_lock(&lkp->lk_interlock);
        if (lkp->lk_exclusivecount != 0)
                lock_type = LK_EXCLUSIVE;
        else if (lkp->lk_sharecount != 0)
                lock_type = LK_SHARED;
        if (lkp->lk_exclusivecount != 0)
                lock_type = LK_EXCLUSIVE;
        else if (lkp->lk_sharecount != 0)
                lock_type = LK_SHARED;
-       atomic_unlock(&lkp->lk_interlock);
+       simple_unlock(&lkp->lk_interlock);
        return (lock_type);
 }
 
        return (lock_type);
 }
 
@@ -118,21 +126,46 @@ lockstatus(lkp)
  * accepted shared locks and shared-to-exclusive upgrades to go away.
  */
 int
  * accepted shared locks and shared-to-exclusive upgrades to go away.
  */
 int
-lockmgr(lkp, flags, p)
-       volatile struct lock *lkp;
+lockmgr(lkp, flags, interlkp, p)
+       __volatile struct lock *lkp;
        u_int flags;
        u_int flags;
+       struct simplelock *interlkp;
        struct proc *p;
 {
        int error;
        pid_t pid;
        struct proc *p;
 {
        int error;
        pid_t pid;
-       volatile int extflags;
+       int extflags;
 
        error = 0;
 
        error = 0;
-       pid = p->p_pid;
-       atomic_lock(&lkp->lk_interlock);
+       if (p)
+               pid = p->p_pid;
+       else
+               pid = LK_KERNPROC;
+       simple_lock(&lkp->lk_interlock);
+       if (flags & LK_INTERLOCK)
+               simple_unlock(interlkp);
        extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
        extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
-       if (lkp->lk_flags & LK_DRAINED)
-               panic("lockmgr: using decommissioned lock");
+#ifdef DIAGNOSTIC
+       /*
+        * Once a lock has drained, the LK_DRAINING flag is set and an
+        * exclusive lock is returned. The only valid operation thereafter
+        * is a single release of that exclusive lock. This final release
+        * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any
+        * further requests of any sort will result in a panic. The bits
+        * selected for these two flags are chosen so that they will be set
+        * in memory that is freed (freed memory is filled with 0xdeadbeef).
+        */
+       if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
+               if (lkp->lk_flags & LK_DRAINED)
+                       panic("lockmgr: using decommissioned lock");
+               if ((flags & LK_TYPE_MASK) != LK_RELEASE ||
+                   lkp->lk_lockholder != pid)
+                       panic("lockmgr: non-release on draining lock: %d\n",
+                           flags & LK_TYPE_MASK);
+               lkp->lk_flags &= ~LK_DRAINING;
+               lkp->lk_flags |= LK_DRAINED;
+       }
+#endif DIAGNOSTIC
 
        switch (flags & LK_TYPE_MASK) {
 
 
        switch (flags & LK_TYPE_MASK) {
 
@@ -154,6 +187,7 @@ lockmgr(lkp, flags, p)
                        if (error)
                                break;
                        lkp->lk_sharecount++;
                        if (error)
                                break;
                        lkp->lk_sharecount++;
+                       COUNT(p, 1);
                        break;
                }
                /*
                        break;
                }
                /*
@@ -161,6 +195,7 @@ lockmgr(lkp, flags, p)
                 * An alternative would be to fail with EDEADLK.
                 */
                lkp->lk_sharecount++;
                 * An alternative would be to fail with EDEADLK.
                 */
                lkp->lk_sharecount++;
+               COUNT(p, 1);
                /* fall into downgrade */
 
        case LK_DOWNGRADE:
                /* fall into downgrade */
 
        case LK_DOWNGRADE:
@@ -182,6 +217,7 @@ lockmgr(lkp, flags, p)
                 */
                if (lkp->lk_flags & LK_WANT_UPGRADE) {
                        lkp->lk_sharecount--;
                 */
                if (lkp->lk_flags & LK_WANT_UPGRADE) {
                        lkp->lk_sharecount--;
+                       COUNT(p, -1);
                        error = EBUSY;
                        break;
                }
                        error = EBUSY;
                        break;
                }
@@ -199,6 +235,7 @@ lockmgr(lkp, flags, p)
                if (lkp->lk_lockholder == pid || lkp->lk_sharecount <= 0)
                        panic("lockmgr: upgrade exclusive lock");
                lkp->lk_sharecount--;
                if (lkp->lk_lockholder == pid || lkp->lk_sharecount <= 0)
                        panic("lockmgr: upgrade exclusive lock");
                lkp->lk_sharecount--;
+               COUNT(p, -1);
                /*
                 * If we are just polling, check to see if we will block.
                 */
                /*
                 * If we are just polling, check to see if we will block.
                 */
@@ -224,6 +261,7 @@ lockmgr(lkp, flags, p)
                        if (lkp->lk_exclusivecount != 0)
                                panic("lockmgr: non-zero exclusive count");
                        lkp->lk_exclusivecount = 1;
                        if (lkp->lk_exclusivecount != 0)
                                panic("lockmgr: non-zero exclusive count");
                        lkp->lk_exclusivecount = 1;
+                       COUNT(p, 1);
                        break;
                }
                /*
                        break;
                }
                /*
@@ -236,13 +274,14 @@ lockmgr(lkp, flags, p)
                /* fall into exclusive request */
 
        case LK_EXCLUSIVE:
                /* fall into exclusive request */
 
        case LK_EXCLUSIVE:
-               if (lkp->lk_lockholder == pid) {
+               if (lkp->lk_lockholder == pid && pid != LK_KERNPROC) {
                        /*
                         *      Recursive lock.
                         */
                        if ((extflags & LK_CANRECURSE) == 0)
                                panic("lockmgr: locking against myself");
                        lkp->lk_exclusivecount++;
                        /*
                         *      Recursive lock.
                         */
                        if ((extflags & LK_CANRECURSE) == 0)
                                panic("lockmgr: locking against myself");
                        lkp->lk_exclusivecount++;
+                       COUNT(p, 1);
                        break;
                }
                /*
                        break;
                }
                /*
@@ -275,6 +314,7 @@ lockmgr(lkp, flags, p)
                if (lkp->lk_exclusivecount != 0)
                        panic("lockmgr: non-zero exclusive count");
                lkp->lk_exclusivecount = 1;
                if (lkp->lk_exclusivecount != 0)
                        panic("lockmgr: non-zero exclusive count");
                lkp->lk_exclusivecount = 1;
+               COUNT(p, 1);
                break;
 
        case LK_RELEASE:
                break;
 
        case LK_RELEASE:
@@ -284,17 +324,28 @@ lockmgr(lkp, flags, p)
                                    pid, "exclusive lock holder",
                                    lkp->lk_lockholder);
                        lkp->lk_exclusivecount--;
                                    pid, "exclusive lock holder",
                                    lkp->lk_lockholder);
                        lkp->lk_exclusivecount--;
+                       COUNT(p, -1);
                        if (lkp->lk_exclusivecount == 0) {
                                lkp->lk_flags &= ~LK_HAVE_EXCL;
                                lkp->lk_lockholder = LK_NOPROC;
                        }
                        if (lkp->lk_exclusivecount == 0) {
                                lkp->lk_flags &= ~LK_HAVE_EXCL;
                                lkp->lk_lockholder = LK_NOPROC;
                        }
-               } else if (lkp->lk_sharecount != 0)
+               } else if (lkp->lk_sharecount != 0) {
                        lkp->lk_sharecount--;
                        lkp->lk_sharecount--;
+                       COUNT(p, -1);
+               }
                if (lkp->lk_waitcount)
                        wakeup((void *)lkp);
                break;
 
        case LK_DRAIN:
                if (lkp->lk_waitcount)
                        wakeup((void *)lkp);
                break;
 
        case LK_DRAIN:
+               /*
+                * Check that we do not already hold the lock, as it can 
+                * never drain if we do. Unfortunately, we have no way to
+                * check for holding a shared lock, but at least we can
+                * check for an exclusive one.
+                */
+               if (lkp->lk_lockholder == pid)
+                       panic("lockmgr: draining against myself");
                /*
                 * If we are just polling, check to see if we will sleep.
                 */
                /*
                 * If we are just polling, check to see if we will sleep.
                 */
@@ -311,19 +362,22 @@ lockmgr(lkp, flags, p)
                     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
                     lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0); ) {
                        lkp->lk_flags |= LK_WAITDRAIN;
                     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
                     lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0); ) {
                        lkp->lk_flags |= LK_WAITDRAIN;
-                       atomic_unlock(&lkp->lk_interlock);
+                       simple_unlock(&lkp->lk_interlock);
                        if (error = tsleep((void *)&lkp->lk_flags, lkp->lk_prio,
                            lkp->lk_wmesg, lkp->lk_timo))
                                return (error);
                        if ((extflags) & LK_SLEEPFAIL)
                                return (ENOLCK);
                        if (error = tsleep((void *)&lkp->lk_flags, lkp->lk_prio,
                            lkp->lk_wmesg, lkp->lk_timo))
                                return (error);
                        if ((extflags) & LK_SLEEPFAIL)
                                return (ENOLCK);
-                       atomic_lock(&lkp->lk_interlock);
+                       simple_lock(&lkp->lk_interlock);
                }
                }
-               lkp->lk_flags |= LK_DRAINED;
+               lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
+               lkp->lk_lockholder = pid;
+               lkp->lk_exclusivecount = 1;
+               COUNT(p, 1);
                break;
 
        default:
                break;
 
        default:
-               atomic_unlock(&lkp->lk_interlock);
+               simple_unlock(&lkp->lk_interlock);
                panic("lockmgr: unknown locktype request %d",
                    flags & LK_TYPE_MASK);
                /* NOTREACHED */
                panic("lockmgr: unknown locktype request %d",
                    flags & LK_TYPE_MASK);
                /* NOTREACHED */
@@ -334,6 +388,117 @@ lockmgr(lkp, flags, p)
                lkp->lk_flags &= ~LK_WAITDRAIN;
                wakeup((void *)&lkp->lk_flags);
        }
                lkp->lk_flags &= ~LK_WAITDRAIN;
                wakeup((void *)&lkp->lk_flags);
        }
-       atomic_unlock(&lkp->lk_interlock);
+       simple_unlock(&lkp->lk_interlock);
        return (error);
 }
        return (error);
 }
+
+/*
+ * Print out information about state of a lock. Used by VOP_PRINT
+ * routines to display ststus about contained locks.
+ */
+lockmgr_printinfo(lkp)
+       struct lock *lkp;
+{
+
+       if (lkp->lk_sharecount)
+               printf(" lock type %s: SHARED", lkp->lk_wmesg);
+       else if (lkp->lk_flags & LK_HAVE_EXCL)
+               printf(" lock type %s: EXCL by pid %d", lkp->lk_wmesg,
+                   lkp->lk_lockholder);
+       if (lkp->lk_waitcount > 0)
+               printf(" with %d pending", lkp->lk_waitcount);
+}
+
+#if defined(DEBUG) && NCPUS == 1
+#include <sys/kernel.h>
+#include <vm/vm.h>
+#include <sys/sysctl.h>
+int lockpausetime = 1;
+struct ctldebug debug2 = { "lockpausetime", &lockpausetime };
+/*
+ * Simple lock functions so that the debugger can see from whence
+ * they are being called.
+ */
+void
+simple_lock_init(alp)
+       struct simplelock *alp;
+{
+
+       alp->lock_data = 0;
+}
+
+void
+_simple_lock(alp, id, l)
+       __volatile struct simplelock *alp;
+       const char *id;
+       int l;
+{
+
+       if (alp->lock_data == 1) {
+               if (lockpausetime == -1)
+                       panic("%s:%d: simple_lock: lock held", id, l);
+               if (lockpausetime == 0) {
+                       printf("%s:%d: simple_lock: lock held\n", id, l);
+                       BACKTRACE(curproc);
+               } else if (lockpausetime > 0) {
+                       printf("%s:%d: simple_lock: lock held...", id, l);
+                       tsleep(&lockpausetime, PCATCH | PPAUSE, "slock",
+                           lockpausetime * hz);
+                       printf(" continuing\n");
+               }
+       }
+       alp->lock_data = 1;
+}
+
+int
+_simple_lock_try(alp, id, l)
+       __volatile struct simplelock *alp;
+       const char *id;
+       int l;
+{
+
+       /*
+       if (alp->lock_data == 1) {
+               if (lockpausetime == -1)
+                       panic("%s:%d: simple_lock_try: lock held", id, l);
+               if (lockpausetime == 0) {
+                       printf("%s:%d: simple_lock_try: lock held\n", id, l);
+                       BACKTRACE(curproc);
+               } else if (lockpausetime > 0) {
+                       printf("%s:%d: simple_lock_try: lock held...", id, l);
+                       tsleep(&lockpausetime, PCATCH | PPAUSE, "slock",
+                           lockpausetime * hz);
+                       printf(" continuing\n");
+               }
+       }
+       */
+       if (alp->lock_data)
+               return (0);
+
+       alp->lock_data = 1;
+       return (1);
+}
+
+void
+_simple_unlock(alp, id, l)
+       __volatile struct simplelock *alp;
+       const char *id;
+       int l;
+{
+
+       if (alp->lock_data == 0) {
+               if (lockpausetime == -1)
+                       panic("%s:%d: simple_unlock: lock not held", id, l);
+               if (lockpausetime == 0) {
+                       printf("%s:%d: simple_unlock: lock not held\n", id, l);
+                       BACKTRACE(curproc);
+               } else if (lockpausetime > 0) {
+                       printf("%s:%d: simple_unlock: lock not held...", id, l);
+                       tsleep(&lockpausetime, PCATCH | PPAUSE, "sunlock",
+                           lockpausetime * hz);
+                       printf(" continuing\n");
+               }
+       }
+       alp->lock_data = 0;
+}
+#endif /* DEBUG && NCPUS == 1 */