convert VOP_UNLOCK and vrele into vput's; add proc parameter to union_dircache
[unix-history] / usr / src / sys / kern / kern_lock.c
index 2478046..0b3fe9e 100644 (file)
@@ -8,18 +8,25 @@
  *
  * %sccs.include.redist.c%
  *
  *
  * %sccs.include.redist.c%
  *
- *     @(#)kern_lock.c 8.5 (Berkeley) %G%
+ *     @(#)kern_lock.c 8.13 (Berkeley) %G%
  */
 
 #include <sys/param.h>
 #include <sys/proc.h>
 #include <sys/lock.h>
  */
 
 #include <sys/param.h>
 #include <sys/proc.h>
 #include <sys/lock.h>
+#include <machine/cpu.h>
 
 /*
  * Locking primitives implementation.
  * Locks provide shared/exclusive sychronization.
  */
 
 
 /*
  * Locking primitives implementation.
  * Locks provide shared/exclusive sychronization.
  */
 
+#ifdef DEBUG
+#define COUNT(p, x) if (p) (p)->p_locks += (x)
+#else
+#define COUNT(p, x)
+#endif
+
 #if NCPUS > 1
 
 /*
 #if NCPUS > 1
 
 /*
@@ -33,11 +40,11 @@ int lock_wait_time = 100;
                if (lock_wait_time > 0) {                               \
                        int i;                                          \
                                                                        \
                if (lock_wait_time > 0) {                               \
                        int i;                                          \
                                                                        \
-                       atomic_unlock(&lkp->lk_interlock);              \
+                       simple_unlock(&lkp->lk_interlock);              \
                        for (i = lock_wait_time; i > 0; i--)            \
                                if (!(wanted))                          \
                                        break;                          \
                        for (i = lock_wait_time; i > 0; i--)            \
                                if (!(wanted))                          \
                                        break;                          \
-                       atomic_lock(&lkp->lk_interlock);                \
+                       simple_lock(&lkp->lk_interlock);                \
                }                                                       \
                if (!(wanted))                                          \
                        break;
                }                                                       \
                if (!(wanted))                                          \
                        break;
@@ -46,7 +53,7 @@ int lock_wait_time = 100;
 
 /*
  * It is an error to spin on a uniprocessor as nothing will ever cause
 
 /*
  * It is an error to spin on a uniprocessor as nothing will ever cause
- * the atomic lock to clear while we are executing.
+ * the simple lock to clear while we are executing.
  */
 #define PAUSE(lkp, wanted)
 
  */
 #define PAUSE(lkp, wanted)
 
@@ -58,11 +65,12 @@ int lock_wait_time = 100;
 #define ACQUIRE(lkp, error, extflags, wanted)                          \
        PAUSE(lkp, wanted);                                             \
        for (error = 0; wanted; ) {                                     \
 #define ACQUIRE(lkp, error, extflags, wanted)                          \
        PAUSE(lkp, wanted);                                             \
        for (error = 0; wanted; ) {                                     \
-               (lkp)->lk_flags |= LK_WAITING;                          \
-               atomic_unlock(&(lkp)->lk_interlock);                    \
+               (lkp)->lk_waitcount++;                                  \
+               simple_unlock(&(lkp)->lk_interlock);                    \
                error = tsleep((void *)lkp, (lkp)->lk_prio,             \
                    (lkp)->lk_wmesg, (lkp)->lk_timo);                   \
                error = tsleep((void *)lkp, (lkp)->lk_prio,             \
                    (lkp)->lk_wmesg, (lkp)->lk_timo);                   \
-               atomic_lock(&(lkp)->lk_interlock);                      \
+               simple_lock(&(lkp)->lk_interlock);                      \
+               (lkp)->lk_waitcount--;                                  \
                if (error)                                              \
                        break;                                          \
                if ((extflags) & LK_SLEEPFAIL) {                        \
                if (error)                                              \
                        break;                                          \
                if ((extflags) & LK_SLEEPFAIL) {                        \
@@ -75,15 +83,16 @@ int lock_wait_time = 100;
  * Initialize a lock; required before use.
  */
 void
  * Initialize a lock; required before use.
  */
 void
-lock_init(lkp, prio, wmesg, timo, flags)
+lockinit(lkp, prio, wmesg, timo, flags)
        struct lock *lkp;
        int prio;
        char *wmesg;
        int timo;
        int flags;
 {
        struct lock *lkp;
        int prio;
        char *wmesg;
        int timo;
        int flags;
 {
+
        bzero(lkp, sizeof(struct lock));
        bzero(lkp, sizeof(struct lock));
-       atomic_lock_init(&lkp->lk_interlock);
+       simple_lock_init(&lkp->lk_interlock);
        lkp->lk_flags = flags & LK_EXTFLG_MASK;
        lkp->lk_prio = prio;
        lkp->lk_timo = timo;
        lkp->lk_flags = flags & LK_EXTFLG_MASK;
        lkp->lk_prio = prio;
        lkp->lk_timo = timo;
@@ -100,12 +109,12 @@ lockstatus(lkp)
 {
        int lock_type = 0;
 
 {
        int lock_type = 0;
 
-       atomic_lock(&lkp->lk_interlock);
+       simple_lock(&lkp->lk_interlock);
        if (lkp->lk_exclusivecount != 0)
                lock_type = LK_EXCLUSIVE;
        else if (lkp->lk_sharecount != 0)
                lock_type = LK_SHARED;
        if (lkp->lk_exclusivecount != 0)
                lock_type = LK_EXCLUSIVE;
        else if (lkp->lk_sharecount != 0)
                lock_type = LK_SHARED;
-       atomic_unlock(&lkp->lk_interlock);
+       simple_unlock(&lkp->lk_interlock);
        return (lock_type);
 }
 
        return (lock_type);
 }
 
@@ -117,18 +126,46 @@ lockstatus(lkp)
  * accepted shared locks and shared-to-exclusive upgrades to go away.
  */
 int
  * accepted shared locks and shared-to-exclusive upgrades to go away.
  */
 int
-lockmgr(lkp, p, flags)
-       volatile struct lock *lkp;
-       struct proc *p;
+lockmgr(lkp, flags, interlkp, p)
+       __volatile struct lock *lkp;
        u_int flags;
        u_int flags;
+       struct simplelock *interlkp;
+       struct proc *p;
 {
        int error;
        pid_t pid;
 {
        int error;
        pid_t pid;
-       volatile int extflags;
+       int extflags;
 
 
-       pid = p->p_pid;
-       atomic_lock(&lkp->lk_interlock);
+       error = 0;
+       if (p)
+               pid = p->p_pid;
+       else
+               pid = LK_KERNPROC;
+       simple_lock(&lkp->lk_interlock);
+       if (flags & LK_INTERLOCK)
+               simple_unlock(interlkp);
        extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
        extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
+#ifdef DIAGNOSTIC
+       /*
+        * Once a lock has drained, the LK_DRAINING flag is set and an
+        * exclusive lock is returned. The only valid operation thereafter
+        * is a single release of that exclusive lock. This final release
+        * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any
+        * further requests of any sort will result in a panic. The bits
+        * selected for these two flags are chosen so that they will be set
+        * in memory that is freed (freed memory is filled with 0xdeadbeef).
+        */
+       if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
+               if (lkp->lk_flags & LK_DRAINED)
+                       panic("lockmgr: using decommissioned lock");
+               if ((flags & LK_TYPE_MASK) != LK_RELEASE ||
+                   lkp->lk_lockholder != pid)
+                       panic("lockmgr: non-release on draining lock: %d\n",
+                           flags & LK_TYPE_MASK);
+               lkp->lk_flags &= ~LK_DRAINING;
+               lkp->lk_flags |= LK_DRAINED;
+       }
+#endif DIAGNOSTIC
 
        switch (flags & LK_TYPE_MASK) {
 
 
        switch (flags & LK_TYPE_MASK) {
 
@@ -139,27 +176,26 @@ lockmgr(lkp, p, flags)
                         */
                        if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
                            (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
                         */
                        if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
                            (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
-                               atomic_unlock(&lkp->lk_interlock);
-                               return (EBUSY);
+                               error = EBUSY;
+                               break;
                        }
                        /*
                         * Wait for exclusive locks and upgrades to clear.
                         */
                        ACQUIRE(lkp, error, extflags, lkp->lk_flags &
                            (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE));
                        }
                        /*
                         * Wait for exclusive locks and upgrades to clear.
                         */
                        ACQUIRE(lkp, error, extflags, lkp->lk_flags &
                            (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE));
-                       if (error) {
-                               atomic_unlock(&lkp->lk_interlock);
-                               return (error);
-                       }
+                       if (error)
+                               break;
                        lkp->lk_sharecount++;
                        lkp->lk_sharecount++;
-                       atomic_unlock(&lkp->lk_interlock);
-                       return (0);
+                       COUNT(p, 1);
+                       break;
                }
                /*
                 * We hold an exclusive lock, so downgrade it to shared.
                 * An alternative would be to fail with EDEADLK.
                 */
                lkp->lk_sharecount++;
                }
                /*
                 * We hold an exclusive lock, so downgrade it to shared.
                 * An alternative would be to fail with EDEADLK.
                 */
                lkp->lk_sharecount++;
+               COUNT(p, 1);
                /* fall into downgrade */
 
        case LK_DOWNGRADE:
                /* fall into downgrade */
 
        case LK_DOWNGRADE:
@@ -169,12 +205,9 @@ lockmgr(lkp, p, flags)
                lkp->lk_exclusivecount = 0;
                lkp->lk_flags &= ~LK_HAVE_EXCL;
                lkp->lk_lockholder = LK_NOPROC;
                lkp->lk_exclusivecount = 0;
                lkp->lk_flags &= ~LK_HAVE_EXCL;
                lkp->lk_lockholder = LK_NOPROC;
-               if (lkp->lk_flags & LK_WAITING) {
-                       lkp->lk_flags &= ~LK_WAITING;
+               if (lkp->lk_waitcount)
                        wakeup((void *)lkp);
                        wakeup((void *)lkp);
-               }
-               atomic_unlock(&lkp->lk_interlock);
-               return (0);
+               break;
 
        case LK_EXCLUPGRADE:
                /*
 
        case LK_EXCLUPGRADE:
                /*
@@ -184,8 +217,9 @@ lockmgr(lkp, p, flags)
                 */
                if (lkp->lk_flags & LK_WANT_UPGRADE) {
                        lkp->lk_sharecount--;
                 */
                if (lkp->lk_flags & LK_WANT_UPGRADE) {
                        lkp->lk_sharecount--;
-                       atomic_unlock(&lkp->lk_interlock);
-                       return (EBUSY);
+                       COUNT(p, -1);
+                       error = EBUSY;
+                       break;
                }
                /* fall into normal upgrade */
 
                }
                /* fall into normal upgrade */
 
@@ -201,14 +235,15 @@ lockmgr(lkp, p, flags)
                if (lkp->lk_lockholder == pid || lkp->lk_sharecount <= 0)
                        panic("lockmgr: upgrade exclusive lock");
                lkp->lk_sharecount--;
                if (lkp->lk_lockholder == pid || lkp->lk_sharecount <= 0)
                        panic("lockmgr: upgrade exclusive lock");
                lkp->lk_sharecount--;
+               COUNT(p, -1);
                /*
                 * If we are just polling, check to see if we will block.
                 */
                if ((extflags & LK_NOWAIT) &&
                    ((lkp->lk_flags & LK_WANT_UPGRADE) ||
                     lkp->lk_sharecount > 1)) {
                /*
                 * If we are just polling, check to see if we will block.
                 */
                if ((extflags & LK_NOWAIT) &&
                    ((lkp->lk_flags & LK_WANT_UPGRADE) ||
                     lkp->lk_sharecount > 1)) {
-                       atomic_unlock(&lkp->lk_interlock);
-                       return (EBUSY);
+                       error = EBUSY;
+                       break;
                }
                if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
                        /*
                }
                if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
                        /*
@@ -219,39 +254,35 @@ lockmgr(lkp, p, flags)
                        lkp->lk_flags |= LK_WANT_UPGRADE;
                        ACQUIRE(lkp, error, extflags, lkp->lk_sharecount);
                        lkp->lk_flags &= ~LK_WANT_UPGRADE;
                        lkp->lk_flags |= LK_WANT_UPGRADE;
                        ACQUIRE(lkp, error, extflags, lkp->lk_sharecount);
                        lkp->lk_flags &= ~LK_WANT_UPGRADE;
-                       if (error) {
-                               atomic_unlock(&lkp->lk_interlock);
-                               return (error);
-                       }
+                       if (error)
+                               break;
                        lkp->lk_flags |= LK_HAVE_EXCL;
                        lkp->lk_lockholder = pid;
                        if (lkp->lk_exclusivecount != 0)
                                panic("lockmgr: non-zero exclusive count");
                        lkp->lk_exclusivecount = 1;
                        lkp->lk_flags |= LK_HAVE_EXCL;
                        lkp->lk_lockholder = pid;
                        if (lkp->lk_exclusivecount != 0)
                                panic("lockmgr: non-zero exclusive count");
                        lkp->lk_exclusivecount = 1;
-                       atomic_unlock(&lkp->lk_interlock);
-                       return (0);
+                       COUNT(p, 1);
+                       break;
                }
                /*
                 * Someone else has requested upgrade. Release our shared
                 * lock, awaken upgrade requestor if we are the last shared
                 * lock, then request an exclusive lock.
                 */
                }
                /*
                 * Someone else has requested upgrade. Release our shared
                 * lock, awaken upgrade requestor if we are the last shared
                 * lock, then request an exclusive lock.
                 */
-               if (lkp->lk_sharecount == 0 && (lkp->lk_flags & LK_WAITING)) {
-                       lkp->lk_flags &= ~LK_WAITING;
+               if (lkp->lk_sharecount == 0 && lkp->lk_waitcount)
                        wakeup((void *)lkp);
                        wakeup((void *)lkp);
-               }
                /* fall into exclusive request */
 
        case LK_EXCLUSIVE:
                /* fall into exclusive request */
 
        case LK_EXCLUSIVE:
-               if (lkp->lk_lockholder == pid) {
+               if (lkp->lk_lockholder == pid && pid != LK_KERNPROC) {
                        /*
                         *      Recursive lock.
                         */
                        if ((extflags & LK_CANRECURSE) == 0)
                                panic("lockmgr: locking against myself");
                        lkp->lk_exclusivecount++;
                        /*
                         *      Recursive lock.
                         */
                        if ((extflags & LK_CANRECURSE) == 0)
                                panic("lockmgr: locking against myself");
                        lkp->lk_exclusivecount++;
-                       atomic_unlock(&lkp->lk_interlock);
-                       return (0);
+                       COUNT(p, 1);
+                       break;
                }
                /*
                 * If we are just polling, check to see if we will sleep.
                }
                /*
                 * If we are just polling, check to see if we will sleep.
@@ -259,18 +290,16 @@ lockmgr(lkp, p, flags)
                if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
                     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
                     lkp->lk_sharecount != 0)) {
                if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
                     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
                     lkp->lk_sharecount != 0)) {
-                       atomic_unlock(&lkp->lk_interlock);
-                       return (EBUSY);
+                       error = EBUSY;
+                       break;
                }
                /*
                 * Try to acquire the want_exclusive flag.
                 */
                ACQUIRE(lkp, error, extflags, lkp->lk_flags &
                    (LK_HAVE_EXCL | LK_WANT_EXCL));
                }
                /*
                 * Try to acquire the want_exclusive flag.
                 */
                ACQUIRE(lkp, error, extflags, lkp->lk_flags &
                    (LK_HAVE_EXCL | LK_WANT_EXCL));
-               if (error) {
-                       atomic_unlock(&lkp->lk_interlock);
-                       return (error);
-               }
+               if (error)
+                       break;
                lkp->lk_flags |= LK_WANT_EXCL;
                /*
                 * Wait for shared locks and upgrades to finish.
                lkp->lk_flags |= LK_WANT_EXCL;
                /*
                 * Wait for shared locks and upgrades to finish.
@@ -278,38 +307,198 @@ lockmgr(lkp, p, flags)
                ACQUIRE(lkp, error, extflags, lkp->lk_sharecount != 0 ||
                       (lkp->lk_flags & LK_WANT_UPGRADE));
                lkp->lk_flags &= ~LK_WANT_EXCL;
                ACQUIRE(lkp, error, extflags, lkp->lk_sharecount != 0 ||
                       (lkp->lk_flags & LK_WANT_UPGRADE));
                lkp->lk_flags &= ~LK_WANT_EXCL;
-               if (error) {
-                       atomic_unlock(&lkp->lk_interlock);
-                       return (error);
-               }
+               if (error)
+                       break;
                lkp->lk_flags |= LK_HAVE_EXCL;
                lkp->lk_lockholder = pid;
                if (lkp->lk_exclusivecount != 0)
                        panic("lockmgr: non-zero exclusive count");
                lkp->lk_exclusivecount = 1;
                lkp->lk_flags |= LK_HAVE_EXCL;
                lkp->lk_lockholder = pid;
                if (lkp->lk_exclusivecount != 0)
                        panic("lockmgr: non-zero exclusive count");
                lkp->lk_exclusivecount = 1;
-               atomic_unlock(&lkp->lk_interlock);
-               return (0);
+               COUNT(p, 1);
+               break;
 
        case LK_RELEASE:
                if (lkp->lk_exclusivecount != 0) {
 
        case LK_RELEASE:
                if (lkp->lk_exclusivecount != 0) {
+                       if (pid != lkp->lk_lockholder)
+                               panic("lockmgr: pid %d, not %s %d unlocking",
+                                   pid, "exclusive lock holder",
+                                   lkp->lk_lockholder);
                        lkp->lk_exclusivecount--;
                        lkp->lk_exclusivecount--;
+                       COUNT(p, -1);
                        if (lkp->lk_exclusivecount == 0) {
                                lkp->lk_flags &= ~LK_HAVE_EXCL;
                                lkp->lk_lockholder = LK_NOPROC;
                        }
                        if (lkp->lk_exclusivecount == 0) {
                                lkp->lk_flags &= ~LK_HAVE_EXCL;
                                lkp->lk_lockholder = LK_NOPROC;
                        }
-               } else if (lkp->lk_sharecount != 0)
+               } else if (lkp->lk_sharecount != 0) {
                        lkp->lk_sharecount--;
                        lkp->lk_sharecount--;
-               if (lkp->lk_flags & LK_WAITING) {
-                       lkp->lk_flags &= ~LK_WAITING;
+                       COUNT(p, -1);
+               }
+               if (lkp->lk_waitcount)
                        wakeup((void *)lkp);
                        wakeup((void *)lkp);
+               break;
+
+       case LK_DRAIN:
+               /*
+                * Check that we do not already hold the lock, as it can 
+                * never drain if we do. Unfortunately, we have no way to
+                * check for holding a shared lock, but at least we can
+                * check for an exclusive one.
+                */
+               if (lkp->lk_lockholder == pid)
+                       panic("lockmgr: draining against myself");
+               /*
+                * If we are just polling, check to see if we will sleep.
+                */
+               if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
+                    (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
+                    lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) {
+                       error = EBUSY;
+                       break;
                }
                }
-               atomic_unlock(&lkp->lk_interlock);
-               return (0);
+               PAUSE(lkp, ((lkp->lk_flags &
+                    (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
+                    lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0));
+               for (error = 0; ((lkp->lk_flags &
+                    (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
+                    lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0); ) {
+                       lkp->lk_flags |= LK_WAITDRAIN;
+                       simple_unlock(&lkp->lk_interlock);
+                       if (error = tsleep((void *)&lkp->lk_flags, lkp->lk_prio,
+                           lkp->lk_wmesg, lkp->lk_timo))
+                               return (error);
+                       if ((extflags) & LK_SLEEPFAIL)
+                               return (ENOLCK);
+                       simple_lock(&lkp->lk_interlock);
+               }
+               lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
+               lkp->lk_lockholder = pid;
+               lkp->lk_exclusivecount = 1;
+               COUNT(p, 1);
+               break;
 
        default:
 
        default:
-               atomic_unlock(&lkp->lk_interlock);
+               simple_unlock(&lkp->lk_interlock);
                panic("lockmgr: unknown locktype request %d",
                    flags & LK_TYPE_MASK);
                /* NOTREACHED */
        }
                panic("lockmgr: unknown locktype request %d",
                    flags & LK_TYPE_MASK);
                /* NOTREACHED */
        }
+       if ((lkp->lk_flags & LK_WAITDRAIN) && ((lkp->lk_flags &
+            (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 &&
+            lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) {
+               lkp->lk_flags &= ~LK_WAITDRAIN;
+               wakeup((void *)&lkp->lk_flags);
+       }
+       simple_unlock(&lkp->lk_interlock);
+       return (error);
+}
+
+/*
+ * Print out information about state of a lock. Used by VOP_PRINT
+ * routines to display ststus about contained locks.
+ */
+lockmgr_printinfo(lkp)
+       struct lock *lkp;
+{
+
+       if (lkp->lk_sharecount)
+               printf(" lock type %s: SHARED", lkp->lk_wmesg);
+       else if (lkp->lk_flags & LK_HAVE_EXCL)
+               printf(" lock type %s: EXCL by pid %d", lkp->lk_wmesg,
+                   lkp->lk_lockholder);
+       if (lkp->lk_waitcount > 0)
+               printf(" with %d pending", lkp->lk_waitcount);
+}
+
+#if defined(DEBUG) && NCPUS == 1
+#include <sys/kernel.h>
+#include <vm/vm.h>
+#include <sys/sysctl.h>
+int lockpausetime = 1;
+struct ctldebug debug2 = { "lockpausetime", &lockpausetime };
+/*
+ * Simple lock functions so that the debugger can see from whence
+ * they are being called.
+ */
+void
+simple_lock_init(alp)
+       struct simplelock *alp;
+{
+
+       alp->lock_data = 0;
+}
+
+void
+_simple_lock(alp, id, l)
+       __volatile struct simplelock *alp;
+       const char *id;
+       int l;
+{
+
+       if (alp->lock_data == 1) {
+               if (lockpausetime == -1)
+                       panic("%s:%d: simple_lock: lock held", id, l);
+               if (lockpausetime == 0) {
+                       printf("%s:%d: simple_lock: lock held\n", id, l);
+                       BACKTRACE(curproc);
+               } else if (lockpausetime > 0) {
+                       printf("%s:%d: simple_lock: lock held...", id, l);
+                       tsleep(&lockpausetime, PCATCH | PPAUSE, "slock",
+                           lockpausetime * hz);
+                       printf(" continuing\n");
+               }
+       }
+       alp->lock_data = 1;
+}
+
+int
+_simple_lock_try(alp, id, l)
+       __volatile struct simplelock *alp;
+       const char *id;
+       int l;
+{
+
+       /*
+       if (alp->lock_data == 1) {
+               if (lockpausetime == -1)
+                       panic("%s:%d: simple_lock_try: lock held", id, l);
+               if (lockpausetime == 0) {
+                       printf("%s:%d: simple_lock_try: lock held\n", id, l);
+                       BACKTRACE(curproc);
+               } else if (lockpausetime > 0) {
+                       printf("%s:%d: simple_lock_try: lock held...", id, l);
+                       tsleep(&lockpausetime, PCATCH | PPAUSE, "slock",
+                           lockpausetime * hz);
+                       printf(" continuing\n");
+               }
+       }
+       */
+       if (alp->lock_data)
+               return (0);
+
+       alp->lock_data = 1;
+       return (1);
+}
+
+void
+_simple_unlock(alp, id, l)
+       __volatile struct simplelock *alp;
+       const char *id;
+       int l;
+{
+
+       if (alp->lock_data == 0) {
+               if (lockpausetime == -1)
+                       panic("%s:%d: simple_unlock: lock not held", id, l);
+               if (lockpausetime == 0) {
+                       printf("%s:%d: simple_unlock: lock not held\n", id, l);
+                       BACKTRACE(curproc);
+               } else if (lockpausetime > 0) {
+                       printf("%s:%d: simple_unlock: lock not held...", id, l);
+                       tsleep(&lockpausetime, PCATCH | PPAUSE, "sunlock",
+                           lockpausetime * hz);
+                       printf(" continuing\n");
+               }
+       }
+       alp->lock_data = 0;
 }
 }
+#endif /* DEBUG && NCPUS == 1 */