Initial import, 0.1 + pk 0.2.4-B1
[unix-history] / sys / ufs / ufs_lockf.c
CommitLineData
15637ed4
RG
1/*
2 * Copyright (c) 1982, 1986, 1989 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Scooter Morris at Genentech Inc.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)ufs_lockf.c 7.7 (Berkeley) 7/2/91
37 *
38 * PATCHES MAGIC LEVEL PATCH THAT GOT US HERE
39 * -------------------- ----- ----------------------
40 * CURRENT PATCH LEVEL: 1 00169
41 * -------------------- ----- ----------------------
42 *
43 * 04 Jun 93 Paul Kranenburg Fix dangling pointer in lockf struct
44 *
45 */
46
47#include "param.h"
48#include "systm.h"
49#include "kernel.h"
50#include "file.h"
51#include "proc.h"
52#include "vnode.h"
53#include "malloc.h"
54#include "fcntl.h"
55
56#include "lockf.h"
57#include "quota.h"
58#include "inode.h"
59
60/*
61 * This variable controls the maximum number of processes that will
62 * be checked in doing deadlock detection.
63 */
64int maxlockdepth = MAXDEPTH;
65
66#ifdef LOCKF_DEBUG
67int lockf_debug = 0;
68#endif /* LOCKF_DEBUG */
69
70#define NOLOCKF (struct lockf *)0
71#define SELF 0x1
72#define OTHERS 0x2
73
74/*
75 * Set a byte-range lock.
76 */
77lf_setlock(lock)
78 register struct lockf *lock;
79{
80 register struct lockf *block;
81 struct inode *ip = lock->lf_inode;
82 struct lockf **prev, *overlap, *ltmp;
83 static char lockstr[] = "lockf";
84 int ovcase, priority, needtolink, error;
85
86#ifdef LOCKF_DEBUG
87 if (lockf_debug & 1)
88 lf_print("lf_setlock", lock);
89#endif /* LOCKF_DEBUG */
90
91 /*
92 * Set the priority
93 */
94 priority = PLOCK;
95 if (lock->lf_type == F_WRLCK)
96 priority += 4;
97 priority |= PCATCH;
98 /*
99 * Scan lock list for this file looking for locks that would block us.
100 */
101 while (block = lf_getblock(lock)) {
102 /*
103 * Free the structure and return if nonblocking.
104 */
105 if ((lock->lf_flags & F_WAIT) == 0) {
106 FREE(lock, M_LOCKF);
107 return (EAGAIN);
108 }
109 /*
110 * We are blocked. Since flock style locks cover
111 * the whole file, there is no chance for deadlock.
112 * For byte-range locks we must check for deadlock.
113 *
114 * Deadlock detection is done by looking through the
115 * wait channels to see if there are any cycles that
116 * involve us. MAXDEPTH is set just to make sure we
117 * do not go off into neverland.
118 */
119 if ((lock->lf_flags & F_POSIX) &&
120 (block->lf_flags & F_POSIX)) {
121 register struct proc *wproc;
122 register struct lockf *waitblock;
123 int i = 0;
124
125 /* The block is waiting on something */
126 wproc = (struct proc *)block->lf_id;
127 while (wproc->p_wchan &&
128 (wproc->p_wmesg == lockstr) &&
129 (i++ < maxlockdepth)) {
130 waitblock = (struct lockf *)wproc->p_wchan;
131 /* Get the owner of the blocking lock */
132 waitblock = waitblock->lf_next;
133 if ((waitblock->lf_flags & F_POSIX) == 0)
134 break;
135 wproc = (struct proc *)waitblock->lf_id;
136 if (wproc == (struct proc *)lock->lf_id) {
137 free(lock, M_LOCKF);
138 return (EDEADLK);
139 }
140 }
141 }
142 /*
143 * For flock type locks, we must first remove
144 * any shared locks that we hold before we sleep
145 * waiting for an exclusive lock.
146 */
147 if ((lock->lf_flags & F_FLOCK) &&
148 lock->lf_type == F_WRLCK) {
149 lock->lf_type = F_UNLCK;
150 (void) lf_clearlock(lock);
151 lock->lf_type = F_WRLCK;
152 }
153 /*
154 * Add our lock to the blocked list and sleep until we're free.
155 * Remember who blocked us (for deadlock detection).
156 */
157 lock->lf_next = block;
158 lf_addblock(block, lock);
159#ifdef LOCKF_DEBUG
160 if (lockf_debug & 1) {
161 lf_print("lf_setlock: blocking on", block);
162 lf_printlist("lf_setlock", block);
163 }
164#endif /* LOCKF_DEBUG */
165 if (error = tsleep((caddr_t)lock, priority, lockstr, 0)) {
166
167 /* Don't leave a dangling pointer in block list */
168 if (lf_getblock(lock) == block) {
169 struct lockf **prev;
170
171 /* Still there, find us on list */
172 prev = &block->lf_block;
173 while ((block = block->lf_block) != NOLOCKF) {
174 if (block == lock) {
175 *prev = block->lf_block;
176 break;
177 }
178 prev = &block->lf_block;
179 }
180 }
181 free(lock, M_LOCKF);
182 return (error);
183 }
184 }
185 /*
186 * No blocks!! Add the lock. Note that we will
187 * downgrade or upgrade any overlapping locks this
188 * process already owns.
189 *
190 * Skip over locks owned by other processes.
191 * Handle any locks that overlap and are owned by ourselves.
192 */
193 prev = &ip->i_lockf;
194 block = ip->i_lockf;
195 needtolink = 1;
196 for (;;) {
197 if (ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap))
198 block = overlap->lf_next;
199 /*
200 * Six cases:
201 * 0) no overlap
202 * 1) overlap == lock
203 * 2) overlap contains lock
204 * 3) lock contains overlap
205 * 4) overlap starts before lock
206 * 5) overlap ends after lock
207 */
208 switch (ovcase) {
209 case 0: /* no overlap */
210 if (needtolink) {
211 *prev = lock;
212 lock->lf_next = overlap;
213 }
214 break;
215
216 case 1: /* overlap == lock */
217 /*
218 * If downgrading lock, others may be
219 * able to acquire it.
220 */
221 if (lock->lf_type == F_RDLCK &&
222 overlap->lf_type == F_WRLCK)
223 lf_wakelock(overlap);
224 overlap->lf_type = lock->lf_type;
225 FREE(lock, M_LOCKF);
226 lock = overlap; /* for debug output below */
227 break;
228
229 case 2: /* overlap contains lock */
230 /*
231 * Check for common starting point and different types.
232 */
233 if (overlap->lf_type == lock->lf_type) {
234 free(lock, M_LOCKF);
235 lock = overlap; /* for debug output below */
236 break;
237 }
238 if (overlap->lf_start == lock->lf_start) {
239 *prev = lock;
240 lock->lf_next = overlap;
241 overlap->lf_start = lock->lf_end + 1;
242 } else
243 lf_split(overlap, lock);
244 lf_wakelock(overlap);
245 break;
246
247 case 3: /* lock contains overlap */
248 /*
249 * If downgrading lock, others may be able to
250 * acquire it, otherwise take the list.
251 */
252 if (lock->lf_type == F_RDLCK &&
253 overlap->lf_type == F_WRLCK) {
254 lf_wakelock(overlap);
255 } else {
256 ltmp = lock->lf_block;
257 lock->lf_block = overlap->lf_block;
258 lf_addblock(lock, ltmp);
259 }
260 /*
261 * Add the new lock if necessary and delete the overlap.
262 */
263 if (needtolink) {
264 *prev = lock;
265 lock->lf_next = overlap->lf_next;
266 prev = &lock->lf_next;
267 needtolink = 0;
268 } else
269 *prev = overlap->lf_next;
270 free(overlap, M_LOCKF);
271 continue;
272
273 case 4: /* overlap starts before lock */
274 /*
275 * Add lock after overlap on the list.
276 */
277 lock->lf_next = overlap->lf_next;
278 overlap->lf_next = lock;
279 overlap->lf_end = lock->lf_start - 1;
280 prev = &lock->lf_next;
281 lf_wakelock(overlap);
282 needtolink = 0;
283 continue;
284
285 case 5: /* overlap ends after lock */
286 /*
287 * Add the new lock before overlap.
288 */
289 if (needtolink) {
290 *prev = lock;
291 lock->lf_next = overlap;
292 }
293 overlap->lf_start = lock->lf_end + 1;
294 lf_wakelock(overlap);
295 break;
296 }
297 break;
298 }
299#ifdef LOCKF_DEBUG
300 if (lockf_debug & 1) {
301 lf_print("lf_setlock: got the lock", lock);
302 lf_printlist("lf_setlock", lock);
303 }
304#endif /* LOCKF_DEBUG */
305 return (0);
306}
307
308/*
309 * Remove a byte-range lock on an inode.
310 *
311 * Generally, find the lock (or an overlap to that lock)
312 * and remove it (or shrink it), then wakeup anyone we can.
313 */
314lf_clearlock(unlock)
315 register struct lockf *unlock;
316{
317 struct inode *ip = unlock->lf_inode;
318 register struct lockf *lf = ip->i_lockf;
319 struct lockf *overlap, **prev;
320 int ovcase;
321
322 if (lf == NOLOCKF)
323 return (0);
324#ifdef LOCKF_DEBUG
325 if (unlock->lf_type != F_UNLCK)
326 panic("lf_clearlock: bad type");
327 if (lockf_debug & 1)
328 lf_print("lf_clearlock", unlock);
329#endif /* LOCKF_DEBUG */
330 prev = &ip->i_lockf;
331 while (ovcase = lf_findoverlap(lf, unlock, SELF, &prev, &overlap)) {
332 /*
333 * Wakeup the list of locks to be retried.
334 */
335 lf_wakelock(overlap);
336
337 switch (ovcase) {
338
339 case 1: /* overlap == lock */
340 *prev = overlap->lf_next;
341 FREE(overlap, M_LOCKF);
342 break;
343
344 case 2: /* overlap contains lock: split it */
345 if (overlap->lf_start == unlock->lf_start) {
346 overlap->lf_start = unlock->lf_end + 1;
347 break;
348 }
349 lf_split(overlap, unlock);
350 overlap->lf_next = unlock->lf_next;
351 break;
352
353 case 3: /* lock contains overlap */
354 *prev = overlap->lf_next;
355 lf = overlap->lf_next;
356 free(overlap, M_LOCKF);
357 continue;
358
359 case 4: /* overlap starts before lock */
360 overlap->lf_end = unlock->lf_start - 1;
361 prev = &overlap->lf_next;
362 lf = overlap->lf_next;
363 continue;
364
365 case 5: /* overlap ends after lock */
366 overlap->lf_start = unlock->lf_end + 1;
367 break;
368 }
369 break;
370 }
371#ifdef LOCKF_DEBUG
372 if (lockf_debug & 1)
373 lf_printlist("lf_clearlock", unlock);
374#endif /* LOCKF_DEBUG */
375 return (0);
376}
377
378/*
379 * Check whether there is a blocking lock,
380 * and if so return its process identifier.
381 */
382lf_getlock(lock, fl)
383 register struct lockf *lock;
384 register struct flock *fl;
385{
386 register struct lockf *block;
387 off_t start, end;
388
389#ifdef LOCKF_DEBUG
390 if (lockf_debug & 1)
391 lf_print("lf_getlock", lock);
392#endif /* LOCKF_DEBUG */
393
394 if (block = lf_getblock(lock)) {
395 fl->l_type = block->lf_type;
396 fl->l_whence = SEEK_SET;
397 fl->l_start = block->lf_start;
398 if (block->lf_end == -1)
399 fl->l_len = 0;
400 else
401 fl->l_len = block->lf_end - block->lf_start + 1;
402 if (block->lf_flags & F_POSIX)
403 fl->l_pid = ((struct proc *)(block->lf_id))->p_pid;
404 else
405 fl->l_pid = -1;
406 } else {
407 fl->l_type = F_UNLCK;
408 }
409 return (0);
410}
411
412/*
413 * Walk the list of locks for an inode and
414 * return the first blocking lock.
415 */
416struct lockf *
417lf_getblock(lock)
418 register struct lockf *lock;
419{
420 struct lockf **prev, *overlap, *lf = lock->lf_inode->i_lockf;
421 int ovcase;
422
423 prev = &lock->lf_inode->i_lockf;
424 while (ovcase = lf_findoverlap(lf, lock, OTHERS, &prev, &overlap)) {
425 /*
426 * We've found an overlap, see if it blocks us
427 */
428 if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
429 return (overlap);
430 /*
431 * Nope, point to the next one on the list and
432 * see if it blocks us
433 */
434 lf = overlap->lf_next;
435 }
436 return (NOLOCKF);
437}
438
439/*
440 * Walk the list of locks for an inode to
441 * find an overlapping lock (if any).
442 *
443 * NOTE: this returns only the FIRST overlapping lock. There
444 * may be more than one.
445 */
446lf_findoverlap(lf, lock, type, prev, overlap)
447 register struct lockf *lf;
448 struct lockf *lock;
449 int type;
450 struct lockf ***prev;
451 struct lockf **overlap;
452{
453 off_t start, end;
454
455 *overlap = lf;
456 if (lf == NOLOCKF)
457 return (0);
458#ifdef LOCKF_DEBUG
459 if (lockf_debug & 2)
460 lf_print("lf_findoverlap: looking for overlap in", lock);
461#endif /* LOCKF_DEBUG */
462 start = lock->lf_start;
463 end = lock->lf_end;
464 while (lf != NOLOCKF) {
465 if (((type & SELF) && lf->lf_id != lock->lf_id) ||
466 ((type & OTHERS) && lf->lf_id == lock->lf_id)) {
467 *prev = &lf->lf_next;
468 *overlap = lf = lf->lf_next;
469 continue;
470 }
471#ifdef LOCKF_DEBUG
472 if (lockf_debug & 2)
473 lf_print("\tchecking", lf);
474#endif /* LOCKF_DEBUG */
475 /*
476 * OK, check for overlap
477 *
478 * Six cases:
479 * 0) no overlap
480 * 1) overlap == lock
481 * 2) overlap contains lock
482 * 3) lock contains overlap
483 * 4) overlap starts before lock
484 * 5) overlap ends after lock
485 */
486 if ((lf->lf_end != -1 && start > lf->lf_end) ||
487 (end != -1 && lf->lf_start > end)) {
488 /* Case 0 */
489#ifdef LOCKF_DEBUG
490 if (lockf_debug & 2)
491 printf("no overlap\n");
492#endif /* LOCKF_DEBUG */
493 if ((type & SELF) && end != -1 && lf->lf_start > end)
494 return (0);
495 *prev = &lf->lf_next;
496 *overlap = lf = lf->lf_next;
497 continue;
498 }
499 if ((lf->lf_start == start) && (lf->lf_end == end)) {
500 /* Case 1 */
501#ifdef LOCKF_DEBUG
502 if (lockf_debug & 2)
503 printf("overlap == lock\n");
504#endif /* LOCKF_DEBUG */
505 return (1);
506 }
507 if ((lf->lf_start <= start) &&
508 (end != -1) &&
509 ((lf->lf_end >= end) || (lf->lf_end == -1))) {
510 /* Case 2 */
511#ifdef LOCKF_DEBUG
512 if (lockf_debug & 2)
513 printf("overlap contains lock\n");
514#endif /* LOCKF_DEBUG */
515 return (2);
516 }
517 if (start <= lf->lf_start &&
518 (end == -1 ||
519 (lf->lf_end != -1 && end >= lf->lf_end))) {
520 /* Case 3 */
521#ifdef LOCKF_DEBUG
522 if (lockf_debug & 2)
523 printf("lock contains overlap\n");
524#endif /* LOCKF_DEBUG */
525 return (3);
526 }
527 if ((lf->lf_start < start) &&
528 ((lf->lf_end >= start) || (lf->lf_end == -1))) {
529 /* Case 4 */
530#ifdef LOCKF_DEBUG
531 if (lockf_debug & 2)
532 printf("overlap starts before lock\n");
533#endif /* LOCKF_DEBUG */
534 return (4);
535 }
536 if ((lf->lf_start > start) &&
537 (end != -1) &&
538 ((lf->lf_end > end) || (lf->lf_end == -1))) {
539 /* Case 5 */
540#ifdef LOCKF_DEBUG
541 if (lockf_debug & 2)
542 printf("overlap ends after lock\n");
543#endif /* LOCKF_DEBUG */
544 return (5);
545 }
546 panic("lf_findoverlap: default");
547 }
548 return (0);
549}
550
551/*
552 * Add a lock to the end of the blocked list.
553 */
554lf_addblock(lock, blocked)
555 struct lockf *lock;
556 struct lockf *blocked;
557{
558 register struct lockf *lf;
559
560 if (blocked == NOLOCKF)
561 return;
562#ifdef LOCKF_DEBUG
563 if (lockf_debug & 2) {
564 lf_print("addblock: adding", blocked);
565 lf_print("to blocked list of", lock);
566 }
567#endif /* LOCKF_DEBUG */
568 if ((lf = lock->lf_block) == NOLOCKF) {
569 lock->lf_block = blocked;
570 return;
571 }
572 while (lf->lf_block != NOLOCKF)
573 lf = lf->lf_block;
574 lf->lf_block = blocked;
575 return;
576}
577
578/*
579 * Split a lock and a contained region into
580 * two or three locks as necessary.
581 */
582lf_split(lock1, lock2)
583 register struct lockf *lock1;
584 register struct lockf *lock2;
585{
586 register struct lockf *splitlock;
587
588#ifdef LOCKF_DEBUG
589 if (lockf_debug & 2) {
590 lf_print("lf_split", lock1);
591 lf_print("splitting from", lock2);
592 }
593#endif /* LOCKF_DEBUG */
594 /*
595 * Check to see if spliting into only two pieces.
596 */
597 if (lock1->lf_start == lock2->lf_start) {
598 lock1->lf_start = lock2->lf_end + 1;
599 lock2->lf_next = lock1;
600 return;
601 }
602 if (lock1->lf_end == lock2->lf_end) {
603 lock1->lf_end = lock2->lf_start - 1;
604 lock2->lf_next = lock1->lf_next;
605 lock1->lf_next = lock2;
606 return;
607 }
608 /*
609 * Make a new lock consisting of the last part of
610 * the encompassing lock
611 */
612 MALLOC(splitlock, struct lockf *, sizeof *splitlock, M_LOCKF, M_WAITOK);
613 bcopy((caddr_t)lock1, (caddr_t)splitlock, sizeof *splitlock);
614 splitlock->lf_start = lock2->lf_end + 1;
615 splitlock->lf_block = NOLOCKF;
616 lock1->lf_end = lock2->lf_start - 1;
617 /*
618 * OK, now link it in
619 */
620 splitlock->lf_next = lock1->lf_next;
621 lock2->lf_next = splitlock;
622 lock1->lf_next = lock2;
623}
624
625/*
626 * Wakeup a blocklist
627 */
628lf_wakelock(listhead)
629 struct lockf *listhead;
630{
631 register struct lockf *blocklist, *wakelock;
632
633 blocklist = listhead->lf_block;
634 listhead->lf_block = NOLOCKF;
635 while (blocklist != NOLOCKF) {
636 wakelock = blocklist;
637 blocklist = blocklist->lf_block;
638 wakelock->lf_block = NOLOCKF;
639 wakelock->lf_next = NOLOCKF;
640#ifdef LOCKF_DEBUG
641 if (lockf_debug & 2)
642 lf_print("lf_wakelock: awakening", wakelock);
643#endif /* LOCKF_DEBUG */
644 wakeup((caddr_t)wakelock);
645 }
646}
647
648#ifdef LOCKF_DEBUG
649/*
650 * Print out a lock.
651 */
652lf_print(tag, lock)
653 char *tag;
654 register struct lockf *lock;
655{
656
657 printf("%s: lock 0x%lx for ", tag, lock);
658 if (lock->lf_flags & F_POSIX)
659 printf("proc %d", ((struct proc *)(lock->lf_id))->p_pid);
660 else
661 printf("id 0x%x", lock->lf_id);
662 printf(" in ino %d on dev <%d, %d>, %s, start %d, end %d",
663 lock->lf_inode->i_number,
664 major(lock->lf_inode->i_dev),
665 minor(lock->lf_inode->i_dev),
666 lock->lf_type == F_RDLCK ? "shared" :
667 lock->lf_type == F_WRLCK ? "exclusive" :
668 lock->lf_type == F_UNLCK ? "unlock" :
669 "unknown", lock->lf_start, lock->lf_end);
670 if (lock->lf_block)
671 printf(" block 0x%x\n", lock->lf_block);
672 else
673 printf("\n");
674}
675
676lf_printlist(tag, lock)
677 char *tag;
678 struct lockf *lock;
679{
680 register struct lockf *lf;
681
682 printf("%s: Lock list for ino %d on dev <%d, %d>:\n",
683 tag, lock->lf_inode->i_number,
684 major(lock->lf_inode->i_dev),
685 minor(lock->lf_inode->i_dev));
686 for (lf = lock->lf_inode->i_lockf; lf; lf = lf->lf_next) {
687 printf("\tlock 0x%lx for ", lf);
688 if (lf->lf_flags & F_POSIX)
689 printf("proc %d", ((struct proc *)(lf->lf_id))->p_pid);
690 else
691 printf("id 0x%x", lf->lf_id);
692 printf(", %s, start %d, end %d",
693 lf->lf_type == F_RDLCK ? "shared" :
694 lf->lf_type == F_WRLCK ? "exclusive" :
695 lf->lf_type == F_UNLCK ? "unlock" :
696 "unknown", lf->lf_start, lf->lf_end);
697 if (lf->lf_block)
698 printf(" block 0x%x\n", lf->lf_block);
699 else
700 printf("\n");
701 }
702}
703#endif /* LOCKF_DEBUG */