This commit was manufactured by cvs2svn to create tag 'FreeBSD-release/1.0'.
[unix-history] / sys / ufs / ufs_lockf.c
CommitLineData
15637ed4
RG
1/*
2 * Copyright (c) 1982, 1986, 1989 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Scooter Morris at Genentech Inc.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
78ed81a3 36 * from: @(#)ufs_lockf.c 7.7 (Berkeley) 7/2/91
37 * $Id: ufs_lockf.c,v 1.4 1993/10/20 07:31:39 davidg Exp $
15637ed4
RG
38 */
39
40#include "param.h"
41#include "systm.h"
42#include "kernel.h"
43#include "file.h"
44#include "proc.h"
45#include "vnode.h"
46#include "malloc.h"
47#include "fcntl.h"
48
49#include "lockf.h"
50#include "quota.h"
51#include "inode.h"
52
78ed81a3 53
54
55/*
56 * Advisory record locking support
57 */
58lf_advlock(head, size, id, op, fl, flags)
59 struct lockf **head;
60 u_long size;
61 caddr_t id;
62 int op;
63 register struct flock *fl;
64 int flags;
65{
66 register struct lockf *lock;
67 off_t start, end;
68 int error;
69
70 /*
71 * Avoid the common case of unlocking when inode has no locks.
72 */
73 if (*head == (struct lockf *)0) {
74 if (op != F_SETLK) {
75 fl->l_type = F_UNLCK;
76 return (0);
77 }
78 }
79
80 /*
81 * Convert the flock structure into a start and end.
82 */
83 switch (fl->l_whence) {
84
85 case SEEK_SET:
86 case SEEK_CUR:
87 /*
88 * Caller is responsible for adding any necessary offset
89 * when SEEK_CUR is used.
90 */
91 start = fl->l_start;
92 break;
93
94 case SEEK_END:
95 start = size + fl->l_start;
96 break;
97
98 default:
99 return (EINVAL);
100 }
101 if (start < 0)
102 return (EINVAL);
103 if (fl->l_len == 0)
104 end = -1;
105 else
106 end = start + fl->l_len - 1;
107 /*
108 * Create the lockf structure
109 */
110 MALLOC(lock, struct lockf *, sizeof *lock, M_LOCKF, M_WAITOK);
111 lock->lf_start = start;
112 lock->lf_end = end;
113 lock->lf_id = id;
114 lock->lf_head = head;
115 lock->lf_type = fl->l_type;
116 lock->lf_next = (struct lockf *)0;
117 lock->lf_block = (struct lockf *)0;
118 lock->lf_flags = flags;
119 /*
120 * Do the requested operation.
121 */
122 switch(op) {
123 case F_SETLK:
124 return (lf_setlock(lock));
125
126 case F_UNLCK:
127 error = lf_clearlock(lock);
128 FREE(lock, M_LOCKF);
129 return (error);
130
131 case F_GETLK:
132 error = lf_getlock(lock, fl);
133 FREE(lock, M_LOCKF);
134 return (error);
135
136 default:
137 free(lock, M_LOCKF);
138 return (EINVAL);
139 }
140 /* NOTREACHED */
141}
142
15637ed4
RG
143/*
144 * This variable controls the maximum number of processes that will
145 * be checked in doing deadlock detection.
146 */
147int maxlockdepth = MAXDEPTH;
148
149#ifdef LOCKF_DEBUG
150int lockf_debug = 0;
151#endif /* LOCKF_DEBUG */
152
153#define NOLOCKF (struct lockf *)0
154#define SELF 0x1
155#define OTHERS 0x2
156
157/*
158 * Set a byte-range lock.
159 */
160lf_setlock(lock)
161 register struct lockf *lock;
162{
163 register struct lockf *block;
78ed81a3 164 struct lockf **head = lock->lf_head;
15637ed4
RG
165 struct lockf **prev, *overlap, *ltmp;
166 static char lockstr[] = "lockf";
167 int ovcase, priority, needtolink, error;
168
169#ifdef LOCKF_DEBUG
170 if (lockf_debug & 1)
171 lf_print("lf_setlock", lock);
172#endif /* LOCKF_DEBUG */
173
174 /*
175 * Set the priority
176 */
177 priority = PLOCK;
178 if (lock->lf_type == F_WRLCK)
179 priority += 4;
180 priority |= PCATCH;
181 /*
182 * Scan lock list for this file looking for locks that would block us.
183 */
184 while (block = lf_getblock(lock)) {
185 /*
186 * Free the structure and return if nonblocking.
187 */
188 if ((lock->lf_flags & F_WAIT) == 0) {
189 FREE(lock, M_LOCKF);
190 return (EAGAIN);
191 }
192 /*
193 * We are blocked. Since flock style locks cover
194 * the whole file, there is no chance for deadlock.
195 * For byte-range locks we must check for deadlock.
196 *
197 * Deadlock detection is done by looking through the
198 * wait channels to see if there are any cycles that
199 * involve us. MAXDEPTH is set just to make sure we
200 * do not go off into neverland.
201 */
202 if ((lock->lf_flags & F_POSIX) &&
203 (block->lf_flags & F_POSIX)) {
204 register struct proc *wproc;
205 register struct lockf *waitblock;
206 int i = 0;
207
208 /* The block is waiting on something */
209 wproc = (struct proc *)block->lf_id;
210 while (wproc->p_wchan &&
211 (wproc->p_wmesg == lockstr) &&
212 (i++ < maxlockdepth)) {
213 waitblock = (struct lockf *)wproc->p_wchan;
214 /* Get the owner of the blocking lock */
215 waitblock = waitblock->lf_next;
216 if ((waitblock->lf_flags & F_POSIX) == 0)
217 break;
218 wproc = (struct proc *)waitblock->lf_id;
219 if (wproc == (struct proc *)lock->lf_id) {
220 free(lock, M_LOCKF);
221 return (EDEADLK);
222 }
223 }
224 }
225 /*
226 * For flock type locks, we must first remove
227 * any shared locks that we hold before we sleep
228 * waiting for an exclusive lock.
229 */
230 if ((lock->lf_flags & F_FLOCK) &&
231 lock->lf_type == F_WRLCK) {
232 lock->lf_type = F_UNLCK;
233 (void) lf_clearlock(lock);
234 lock->lf_type = F_WRLCK;
235 }
236 /*
237 * Add our lock to the blocked list and sleep until we're free.
238 * Remember who blocked us (for deadlock detection).
239 */
240 lock->lf_next = block;
241 lf_addblock(block, lock);
242#ifdef LOCKF_DEBUG
243 if (lockf_debug & 1) {
244 lf_print("lf_setlock: blocking on", block);
245 lf_printlist("lf_setlock", block);
246 }
247#endif /* LOCKF_DEBUG */
248 if (error = tsleep((caddr_t)lock, priority, lockstr, 0)) {
249
78ed81a3 250#ifdef PK_LOCKF_FIX /* Paul Kranenburg's lockf fix (buggy!) */
15637ed4
RG
251 /* Don't leave a dangling pointer in block list */
252 if (lf_getblock(lock) == block) {
253 struct lockf **prev;
254
255 /* Still there, find us on list */
256 prev = &block->lf_block;
257 while ((block = block->lf_block) != NOLOCKF) {
258 if (block == lock) {
259 *prev = block->lf_block;
260 break;
261 }
262 prev = &block->lf_block;
263 }
264 }
265 free(lock, M_LOCKF);
78ed81a3 266#else /* Mark Tinguely's fix instead */
267 (void) lf_clearlock(lock);
268 return (error);
269#endif
270#if 0 /* ...and this is the original code -DLG */
271 free(lock, M_LOCKF);
272#endif
15637ed4
RG
273 return (error);
274 }
275 }
276 /*
277 * No blocks!! Add the lock. Note that we will
278 * downgrade or upgrade any overlapping locks this
279 * process already owns.
280 *
281 * Skip over locks owned by other processes.
282 * Handle any locks that overlap and are owned by ourselves.
283 */
78ed81a3 284 prev = head;
285 block = *head;
15637ed4
RG
286 needtolink = 1;
287 for (;;) {
288 if (ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap))
289 block = overlap->lf_next;
290 /*
291 * Six cases:
292 * 0) no overlap
293 * 1) overlap == lock
294 * 2) overlap contains lock
295 * 3) lock contains overlap
296 * 4) overlap starts before lock
297 * 5) overlap ends after lock
298 */
299 switch (ovcase) {
300 case 0: /* no overlap */
301 if (needtolink) {
302 *prev = lock;
303 lock->lf_next = overlap;
304 }
305 break;
306
307 case 1: /* overlap == lock */
308 /*
309 * If downgrading lock, others may be
310 * able to acquire it.
311 */
312 if (lock->lf_type == F_RDLCK &&
313 overlap->lf_type == F_WRLCK)
314 lf_wakelock(overlap);
315 overlap->lf_type = lock->lf_type;
316 FREE(lock, M_LOCKF);
317 lock = overlap; /* for debug output below */
318 break;
319
320 case 2: /* overlap contains lock */
321 /*
322 * Check for common starting point and different types.
323 */
324 if (overlap->lf_type == lock->lf_type) {
325 free(lock, M_LOCKF);
326 lock = overlap; /* for debug output below */
327 break;
328 }
329 if (overlap->lf_start == lock->lf_start) {
330 *prev = lock;
331 lock->lf_next = overlap;
332 overlap->lf_start = lock->lf_end + 1;
333 } else
334 lf_split(overlap, lock);
335 lf_wakelock(overlap);
336 break;
337
338 case 3: /* lock contains overlap */
339 /*
340 * If downgrading lock, others may be able to
341 * acquire it, otherwise take the list.
342 */
343 if (lock->lf_type == F_RDLCK &&
344 overlap->lf_type == F_WRLCK) {
345 lf_wakelock(overlap);
346 } else {
347 ltmp = lock->lf_block;
348 lock->lf_block = overlap->lf_block;
349 lf_addblock(lock, ltmp);
350 }
351 /*
352 * Add the new lock if necessary and delete the overlap.
353 */
354 if (needtolink) {
355 *prev = lock;
356 lock->lf_next = overlap->lf_next;
357 prev = &lock->lf_next;
358 needtolink = 0;
359 } else
360 *prev = overlap->lf_next;
361 free(overlap, M_LOCKF);
362 continue;
363
364 case 4: /* overlap starts before lock */
365 /*
366 * Add lock after overlap on the list.
367 */
368 lock->lf_next = overlap->lf_next;
369 overlap->lf_next = lock;
370 overlap->lf_end = lock->lf_start - 1;
371 prev = &lock->lf_next;
372 lf_wakelock(overlap);
373 needtolink = 0;
374 continue;
375
376 case 5: /* overlap ends after lock */
377 /*
378 * Add the new lock before overlap.
379 */
380 if (needtolink) {
381 *prev = lock;
382 lock->lf_next = overlap;
383 }
384 overlap->lf_start = lock->lf_end + 1;
385 lf_wakelock(overlap);
386 break;
387 }
388 break;
389 }
390#ifdef LOCKF_DEBUG
391 if (lockf_debug & 1) {
392 lf_print("lf_setlock: got the lock", lock);
393 lf_printlist("lf_setlock", lock);
394 }
395#endif /* LOCKF_DEBUG */
396 return (0);
397}
398
399/*
400 * Remove a byte-range lock on an inode.
401 *
402 * Generally, find the lock (or an overlap to that lock)
403 * and remove it (or shrink it), then wakeup anyone we can.
404 */
405lf_clearlock(unlock)
406 register struct lockf *unlock;
407{
78ed81a3 408 struct lockf **head = unlock->lf_head;
409 register struct lockf *lf = *head;
15637ed4
RG
410 struct lockf *overlap, **prev;
411 int ovcase;
412
413 if (lf == NOLOCKF)
414 return (0);
415#ifdef LOCKF_DEBUG
416 if (unlock->lf_type != F_UNLCK)
417 panic("lf_clearlock: bad type");
418 if (lockf_debug & 1)
419 lf_print("lf_clearlock", unlock);
420#endif /* LOCKF_DEBUG */
78ed81a3 421 prev = head;
15637ed4
RG
422 while (ovcase = lf_findoverlap(lf, unlock, SELF, &prev, &overlap)) {
423 /*
424 * Wakeup the list of locks to be retried.
425 */
426 lf_wakelock(overlap);
427
428 switch (ovcase) {
429
430 case 1: /* overlap == lock */
431 *prev = overlap->lf_next;
432 FREE(overlap, M_LOCKF);
433 break;
434
435 case 2: /* overlap contains lock: split it */
436 if (overlap->lf_start == unlock->lf_start) {
437 overlap->lf_start = unlock->lf_end + 1;
438 break;
439 }
440 lf_split(overlap, unlock);
441 overlap->lf_next = unlock->lf_next;
442 break;
443
444 case 3: /* lock contains overlap */
445 *prev = overlap->lf_next;
446 lf = overlap->lf_next;
447 free(overlap, M_LOCKF);
448 continue;
449
450 case 4: /* overlap starts before lock */
451 overlap->lf_end = unlock->lf_start - 1;
452 prev = &overlap->lf_next;
453 lf = overlap->lf_next;
454 continue;
455
456 case 5: /* overlap ends after lock */
457 overlap->lf_start = unlock->lf_end + 1;
458 break;
459 }
460 break;
461 }
462#ifdef LOCKF_DEBUG
463 if (lockf_debug & 1)
464 lf_printlist("lf_clearlock", unlock);
465#endif /* LOCKF_DEBUG */
466 return (0);
467}
468
469/*
470 * Check whether there is a blocking lock,
471 * and if so return its process identifier.
472 */
473lf_getlock(lock, fl)
474 register struct lockf *lock;
475 register struct flock *fl;
476{
477 register struct lockf *block;
478 off_t start, end;
479
480#ifdef LOCKF_DEBUG
481 if (lockf_debug & 1)
482 lf_print("lf_getlock", lock);
483#endif /* LOCKF_DEBUG */
484
485 if (block = lf_getblock(lock)) {
486 fl->l_type = block->lf_type;
487 fl->l_whence = SEEK_SET;
488 fl->l_start = block->lf_start;
489 if (block->lf_end == -1)
490 fl->l_len = 0;
491 else
492 fl->l_len = block->lf_end - block->lf_start + 1;
493 if (block->lf_flags & F_POSIX)
494 fl->l_pid = ((struct proc *)(block->lf_id))->p_pid;
495 else
496 fl->l_pid = -1;
497 } else {
498 fl->l_type = F_UNLCK;
499 }
500 return (0);
501}
502
503/*
504 * Walk the list of locks for an inode and
505 * return the first blocking lock.
506 */
507struct lockf *
508lf_getblock(lock)
509 register struct lockf *lock;
510{
78ed81a3 511 struct lockf **prev, *overlap, *lf = *(lock->lf_head);
15637ed4
RG
512 int ovcase;
513
78ed81a3 514 prev = lock->lf_head;
15637ed4
RG
515 while (ovcase = lf_findoverlap(lf, lock, OTHERS, &prev, &overlap)) {
516 /*
517 * We've found an overlap, see if it blocks us
518 */
519 if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
520 return (overlap);
521 /*
522 * Nope, point to the next one on the list and
523 * see if it blocks us
524 */
525 lf = overlap->lf_next;
526 }
527 return (NOLOCKF);
528}
529
530/*
531 * Walk the list of locks for an inode to
532 * find an overlapping lock (if any).
533 *
534 * NOTE: this returns only the FIRST overlapping lock. There
535 * may be more than one.
536 */
537lf_findoverlap(lf, lock, type, prev, overlap)
538 register struct lockf *lf;
539 struct lockf *lock;
540 int type;
541 struct lockf ***prev;
542 struct lockf **overlap;
543{
544 off_t start, end;
545
546 *overlap = lf;
547 if (lf == NOLOCKF)
548 return (0);
549#ifdef LOCKF_DEBUG
550 if (lockf_debug & 2)
551 lf_print("lf_findoverlap: looking for overlap in", lock);
552#endif /* LOCKF_DEBUG */
553 start = lock->lf_start;
554 end = lock->lf_end;
555 while (lf != NOLOCKF) {
556 if (((type & SELF) && lf->lf_id != lock->lf_id) ||
557 ((type & OTHERS) && lf->lf_id == lock->lf_id)) {
558 *prev = &lf->lf_next;
559 *overlap = lf = lf->lf_next;
560 continue;
561 }
562#ifdef LOCKF_DEBUG
563 if (lockf_debug & 2)
564 lf_print("\tchecking", lf);
565#endif /* LOCKF_DEBUG */
566 /*
567 * OK, check for overlap
568 *
569 * Six cases:
570 * 0) no overlap
571 * 1) overlap == lock
572 * 2) overlap contains lock
573 * 3) lock contains overlap
574 * 4) overlap starts before lock
575 * 5) overlap ends after lock
576 */
577 if ((lf->lf_end != -1 && start > lf->lf_end) ||
578 (end != -1 && lf->lf_start > end)) {
579 /* Case 0 */
580#ifdef LOCKF_DEBUG
581 if (lockf_debug & 2)
582 printf("no overlap\n");
583#endif /* LOCKF_DEBUG */
584 if ((type & SELF) && end != -1 && lf->lf_start > end)
585 return (0);
586 *prev = &lf->lf_next;
587 *overlap = lf = lf->lf_next;
588 continue;
589 }
590 if ((lf->lf_start == start) && (lf->lf_end == end)) {
591 /* Case 1 */
592#ifdef LOCKF_DEBUG
593 if (lockf_debug & 2)
594 printf("overlap == lock\n");
595#endif /* LOCKF_DEBUG */
596 return (1);
597 }
598 if ((lf->lf_start <= start) &&
599 (end != -1) &&
600 ((lf->lf_end >= end) || (lf->lf_end == -1))) {
601 /* Case 2 */
602#ifdef LOCKF_DEBUG
603 if (lockf_debug & 2)
604 printf("overlap contains lock\n");
605#endif /* LOCKF_DEBUG */
606 return (2);
607 }
608 if (start <= lf->lf_start &&
78ed81a3 609 (end == -1 ||
15637ed4
RG
610 (lf->lf_end != -1 && end >= lf->lf_end))) {
611 /* Case 3 */
612#ifdef LOCKF_DEBUG
613 if (lockf_debug & 2)
614 printf("lock contains overlap\n");
615#endif /* LOCKF_DEBUG */
616 return (3);
617 }
618 if ((lf->lf_start < start) &&
619 ((lf->lf_end >= start) || (lf->lf_end == -1))) {
620 /* Case 4 */
621#ifdef LOCKF_DEBUG
622 if (lockf_debug & 2)
623 printf("overlap starts before lock\n");
624#endif /* LOCKF_DEBUG */
625 return (4);
626 }
627 if ((lf->lf_start > start) &&
628 (end != -1) &&
629 ((lf->lf_end > end) || (lf->lf_end == -1))) {
630 /* Case 5 */
631#ifdef LOCKF_DEBUG
632 if (lockf_debug & 2)
633 printf("overlap ends after lock\n");
634#endif /* LOCKF_DEBUG */
635 return (5);
636 }
637 panic("lf_findoverlap: default");
638 }
639 return (0);
640}
641
642/*
643 * Add a lock to the end of the blocked list.
644 */
645lf_addblock(lock, blocked)
646 struct lockf *lock;
647 struct lockf *blocked;
648{
649 register struct lockf *lf;
650
651 if (blocked == NOLOCKF)
652 return;
653#ifdef LOCKF_DEBUG
654 if (lockf_debug & 2) {
655 lf_print("addblock: adding", blocked);
656 lf_print("to blocked list of", lock);
657 }
658#endif /* LOCKF_DEBUG */
659 if ((lf = lock->lf_block) == NOLOCKF) {
660 lock->lf_block = blocked;
661 return;
662 }
663 while (lf->lf_block != NOLOCKF)
664 lf = lf->lf_block;
665 lf->lf_block = blocked;
666 return;
667}
668
669/*
670 * Split a lock and a contained region into
671 * two or three locks as necessary.
672 */
673lf_split(lock1, lock2)
674 register struct lockf *lock1;
675 register struct lockf *lock2;
676{
677 register struct lockf *splitlock;
678
679#ifdef LOCKF_DEBUG
680 if (lockf_debug & 2) {
681 lf_print("lf_split", lock1);
682 lf_print("splitting from", lock2);
683 }
684#endif /* LOCKF_DEBUG */
685 /*
686 * Check to see if spliting into only two pieces.
687 */
688 if (lock1->lf_start == lock2->lf_start) {
689 lock1->lf_start = lock2->lf_end + 1;
690 lock2->lf_next = lock1;
691 return;
692 }
693 if (lock1->lf_end == lock2->lf_end) {
694 lock1->lf_end = lock2->lf_start - 1;
695 lock2->lf_next = lock1->lf_next;
696 lock1->lf_next = lock2;
697 return;
698 }
699 /*
700 * Make a new lock consisting of the last part of
701 * the encompassing lock
702 */
703 MALLOC(splitlock, struct lockf *, sizeof *splitlock, M_LOCKF, M_WAITOK);
704 bcopy((caddr_t)lock1, (caddr_t)splitlock, sizeof *splitlock);
705 splitlock->lf_start = lock2->lf_end + 1;
706 splitlock->lf_block = NOLOCKF;
707 lock1->lf_end = lock2->lf_start - 1;
708 /*
709 * OK, now link it in
710 */
711 splitlock->lf_next = lock1->lf_next;
712 lock2->lf_next = splitlock;
713 lock1->lf_next = lock2;
714}
715
716/*
717 * Wakeup a blocklist
718 */
719lf_wakelock(listhead)
720 struct lockf *listhead;
721{
78ed81a3 722 register struct lockf *blocklist, *wakelock;
15637ed4
RG
723
724 blocklist = listhead->lf_block;
725 listhead->lf_block = NOLOCKF;
78ed81a3 726 while (blocklist != NOLOCKF) {
727 wakelock = blocklist;
728 blocklist = blocklist->lf_block;
15637ed4
RG
729 wakelock->lf_block = NOLOCKF;
730 wakelock->lf_next = NOLOCKF;
731#ifdef LOCKF_DEBUG
732 if (lockf_debug & 2)
733 lf_print("lf_wakelock: awakening", wakelock);
734#endif /* LOCKF_DEBUG */
78ed81a3 735 wakeup((caddr_t)wakelock);
736 }
15637ed4
RG
737}
738
739#ifdef LOCKF_DEBUG
740/*
741 * Print out a lock.
742 */
743lf_print(tag, lock)
744 char *tag;
745 register struct lockf *lock;
746{
747
748 printf("%s: lock 0x%lx for ", tag, lock);
749 if (lock->lf_flags & F_POSIX)
750 printf("proc %d", ((struct proc *)(lock->lf_id))->p_pid);
751 else
752 printf("id 0x%x", lock->lf_id);
753 printf(" in ino %d on dev <%d, %d>, %s, start %d, end %d",
754 lock->lf_inode->i_number,
755 major(lock->lf_inode->i_dev),
756 minor(lock->lf_inode->i_dev),
757 lock->lf_type == F_RDLCK ? "shared" :
758 lock->lf_type == F_WRLCK ? "exclusive" :
759 lock->lf_type == F_UNLCK ? "unlock" :
760 "unknown", lock->lf_start, lock->lf_end);
761 if (lock->lf_block)
762 printf(" block 0x%x\n", lock->lf_block);
763 else
764 printf("\n");
765}
766
767lf_printlist(tag, lock)
768 char *tag;
769 struct lockf *lock;
770{
771 register struct lockf *lf;
772
773 printf("%s: Lock list for ino %d on dev <%d, %d>:\n",
774 tag, lock->lf_inode->i_number,
775 major(lock->lf_inode->i_dev),
776 minor(lock->lf_inode->i_dev));
777 for (lf = lock->lf_inode->i_lockf; lf; lf = lf->lf_next) {
778 printf("\tlock 0x%lx for ", lf);
779 if (lf->lf_flags & F_POSIX)
780 printf("proc %d", ((struct proc *)(lf->lf_id))->p_pid);
781 else
782 printf("id 0x%x", lf->lf_id);
783 printf(", %s, start %d, end %d",
784 lf->lf_type == F_RDLCK ? "shared" :
785 lf->lf_type == F_WRLCK ? "exclusive" :
786 lf->lf_type == F_UNLCK ? "unlock" :
787 "unknown", lf->lf_start, lf->lf_end);
788 if (lf->lf_block)
789 printf(" block 0x%x\n", lf->lf_block);
790 else
791 printf("\n");
792 }
793}
794#endif /* LOCKF_DEBUG */