Commit | Line | Data |
---|---|---|
f3e2d1ca KM |
1 | /* |
2 | * Copyright (c) 1982, 1986, 1989 Regents of the University of California. | |
3 | * All rights reserved. | |
4 | * | |
5 | * This code is derived from software contributed to Berkeley by | |
6 | * Scooter Morris at Genentech Inc. | |
7 | * | |
8 | * %sccs.include.redist.c% | |
9 | * | |
7a1ac898 | 10 | * @(#)ufs_lockf.c 7.5 (Berkeley) %G% |
f3e2d1ca KM |
11 | */ |
12 | ||
13 | #include "param.h" | |
14 | #include "systm.h" | |
f3e2d1ca KM |
15 | #include "kernel.h" |
16 | #include "file.h" | |
17 | #include "proc.h" | |
f3e2d1ca | 18 | #include "vnode.h" |
f3e2d1ca KM |
19 | #include "malloc.h" |
20 | #include "fcntl.h" | |
c6f5111d MK |
21 | |
22 | #include "lockf.h" | |
23 | #include "quota.h" | |
24 | #include "inode.h" | |
f3e2d1ca | 25 | |
bf0442dc KM |
26 | /* |
27 | * This variable controls the maximum number of processes that will | |
28 | * be checked in doing deadlock detection. | |
29 | */ | |
30 | int maxlockdepth = MAXDEPTH; | |
31 | ||
32 | #ifdef LOCKF_DEBUG | |
f3e2d1ca KM |
33 | int lockf_debug = 0; |
34 | #endif /* LOCKF_DEBUG */ | |
35 | ||
bf0442dc KM |
36 | #define NOLOCKF (struct lockf *)0 |
37 | #define SELF 0x1 | |
38 | #define OTHERS 0x2 | |
f3e2d1ca KM |
39 | |
40 | /* | |
bf0442dc | 41 | * Set a byte-range lock. |
f3e2d1ca | 42 | */ |
bf0442dc | 43 | lf_setlock(lock) |
f3e2d1ca KM |
44 | register struct lockf *lock; |
45 | { | |
bf0442dc KM |
46 | register struct lockf *block; |
47 | struct inode *ip = lock->lf_inode; | |
48 | struct lockf **prev, *overlap, *ltmp; | |
49 | static char lockstr[] = "lockf"; | |
50 | int ovcase, priority, needtolink, error; | |
51 | ||
52 | #ifdef LOCKF_DEBUG | |
53 | if (lockf_debug & 1) | |
54 | lf_print("lf_setlock", lock); | |
55 | #endif /* LOCKF_DEBUG */ | |
f3e2d1ca KM |
56 | |
57 | /* | |
bf0442dc | 58 | * Set the priority |
f3e2d1ca | 59 | */ |
bf0442dc KM |
60 | priority = PLOCK; |
61 | if (lock->lf_type == F_WRLCK) | |
62 | priority += 4; | |
63 | priority |= PCATCH; | |
f3e2d1ca | 64 | /* |
bf0442dc | 65 | * Scan lock list for this file looking for locks that would block us. |
f3e2d1ca | 66 | */ |
bf0442dc KM |
67 | while (block = lf_getblock(lock)) { |
68 | /* | |
69 | * Free the structure and return if nonblocking. | |
70 | */ | |
71 | if ((lock->lf_flags & F_WAIT) == 0) { | |
72 | FREE(lock, M_LOCKF); | |
73 | return (EAGAIN); | |
74 | } | |
75 | /* | |
76 | * We are blocked. Since flock style locks cover | |
77 | * the whole file, there is no chance for deadlock. | |
78 | * For byte-range locks we must check for deadlock. | |
79 | * | |
80 | * Deadlock detection is done by looking through the | |
81 | * wait channels to see if there are any cycles that | |
82 | * involve us. MAXDEPTH is set just to make sure we | |
83 | * do not go off into neverland. | |
84 | */ | |
85 | if ((lock->lf_flags & F_POSIX) && | |
86 | (block->lf_flags & F_POSIX)) { | |
87 | register struct proc *wproc; | |
88 | register struct lockf *waitblock; | |
89 | int i = 0; | |
90 | ||
91 | /* The block is waiting on something */ | |
92 | wproc = (struct proc *)block->lf_id; | |
93 | while (wproc->p_wchan && | |
94 | (wproc->p_wmesg == lockstr) && | |
95 | (i++ < maxlockdepth)) { | |
96 | waitblock = (struct lockf *)wproc->p_wchan; | |
97 | /* Get the owner of the blocking lock */ | |
98 | waitblock = waitblock->lf_next; | |
99 | if ((waitblock->lf_flags & F_POSIX) == 0) | |
100 | break; | |
101 | wproc = (struct proc *)waitblock->lf_id; | |
102 | if (wproc == (struct proc *)lock->lf_id) { | |
103 | free(lock, M_LOCKF); | |
104 | return (EDEADLK); | |
105 | } | |
106 | } | |
107 | } | |
108 | /* | |
109 | * For flock type locks, we must first remove | |
110 | * any shared locks that we hold before we sleep | |
111 | * waiting for an exclusive lock. | |
112 | */ | |
113 | if ((lock->lf_flags & F_FLOCK) && | |
114 | lock->lf_type == F_WRLCK) { | |
115 | lock->lf_type = F_UNLCK; | |
116 | (void) lf_clearlock(lock); | |
117 | lock->lf_type = F_WRLCK; | |
118 | } | |
119 | /* | |
120 | * Add our lock to the blocked list and sleep until we're free. | |
121 | * Remember who blocked us (for deadlock detection). | |
122 | */ | |
123 | lock->lf_next = block; | |
124 | lf_addblock(block, lock); | |
125 | #ifdef LOCKF_DEBUG | |
126 | if (lockf_debug & 1) { | |
127 | lf_print("lf_setlock: blocking on", block); | |
128 | lf_printlist("lf_setlock", block); | |
129 | } | |
130 | #endif /* LOCKF_DEBUG */ | |
131 | if (error = tsleep((caddr_t *)lock, priority, lockstr, 0)) { | |
132 | free(lock, M_LOCKF); | |
133 | return (error); | |
134 | } | |
135 | } | |
f3e2d1ca | 136 | /* |
bf0442dc KM |
137 | * No blocks!! Add the lock. Note that we will |
138 | * downgrade or upgrade any overlapping locks this | |
139 | * process already owns. | |
140 | * | |
f3e2d1ca | 141 | * Skip over locks owned by other processes. |
bf0442dc | 142 | * Handle any locks that overlap and are owned by ourselves. |
f3e2d1ca | 143 | */ |
bf0442dc KM |
144 | prev = &ip->i_lockf; |
145 | block = ip->i_lockf; | |
146 | needtolink = 1; | |
f3e2d1ca | 147 | for (;;) { |
bf0442dc KM |
148 | ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap); |
149 | block = overlap->lf_next; | |
f3e2d1ca | 150 | /* |
bf0442dc KM |
151 | * Six cases: |
152 | * 0) no overlap | |
f3e2d1ca KM |
153 | * 1) overlap == lock |
154 | * 2) overlap contains lock | |
155 | * 3) lock contains overlap | |
156 | * 4) overlap starts before lock | |
157 | * 5) overlap ends after lock | |
158 | */ | |
159 | switch (ovcase) { | |
bf0442dc KM |
160 | case 0: /* no overlap */ |
161 | if (needtolink) { | |
162 | *prev = lock; | |
163 | lock->lf_next = overlap; | |
164 | } | |
165 | break; | |
f3e2d1ca KM |
166 | |
167 | case 1: /* overlap == lock */ | |
f3e2d1ca KM |
168 | /* |
169 | * If downgrading lock, others may be | |
170 | * able to acquire it. | |
171 | */ | |
172 | if (lock->lf_type == F_RDLCK && | |
173 | overlap->lf_type == F_WRLCK) | |
174 | lf_wakelock(overlap); | |
175 | overlap->lf_type = lock->lf_type; | |
bf0442dc KM |
176 | FREE(lock, M_LOCKF); |
177 | lock = overlap; /* for debug output below */ | |
178 | break; | |
f3e2d1ca KM |
179 | |
180 | case 2: /* overlap contains lock */ | |
181 | /* | |
bf0442dc | 182 | * Check for common starting point and different types. |
f3e2d1ca | 183 | */ |
f3e2d1ca KM |
184 | if (overlap->lf_type == lock->lf_type) { |
185 | free(lock, M_LOCKF); | |
bf0442dc KM |
186 | lock = overlap; /* for debug output below */ |
187 | break; | |
f3e2d1ca | 188 | } |
bf0442dc KM |
189 | if (overlap->lf_start == lock->lf_start) { |
190 | *prev = lock; | |
191 | lock->lf_next = overlap; | |
192 | overlap->lf_start = lock->lf_end + 1; | |
193 | } else | |
194 | lf_split(overlap, lock); | |
f3e2d1ca | 195 | lf_wakelock(overlap); |
bf0442dc | 196 | break; |
f3e2d1ca KM |
197 | |
198 | case 3: /* lock contains overlap */ | |
199 | /* | |
200 | * If downgrading lock, others may be able to | |
201 | * acquire it, otherwise take the list. | |
202 | */ | |
203 | if (lock->lf_type == F_RDLCK && | |
204 | overlap->lf_type == F_WRLCK) { | |
205 | lf_wakelock(overlap); | |
206 | } else { | |
207 | ltmp = lock->lf_block; | |
208 | lock->lf_block = overlap->lf_block; | |
209 | lf_addblock(lock, ltmp); | |
210 | } | |
211 | /* | |
bf0442dc | 212 | * Add the new lock if necessary and delete the overlap. |
f3e2d1ca | 213 | */ |
bf0442dc KM |
214 | if (needtolink) { |
215 | *prev = lock; | |
216 | lock->lf_next = overlap->lf_next; | |
217 | prev = &lock->lf_next; | |
218 | needtolink = 0; | |
219 | } else | |
220 | *prev = overlap->lf_next; | |
f3e2d1ca | 221 | free(overlap, M_LOCKF); |
bf0442dc | 222 | continue; |
f3e2d1ca KM |
223 | |
224 | case 4: /* overlap starts before lock */ | |
225 | /* | |
bf0442dc | 226 | * Add lock after overlap on the list. |
f3e2d1ca | 227 | */ |
f3e2d1ca KM |
228 | lock->lf_next = overlap->lf_next; |
229 | overlap->lf_next = lock; | |
230 | overlap->lf_end = lock->lf_start - 1; | |
bf0442dc | 231 | prev = &lock->lf_next; |
f3e2d1ca | 232 | lf_wakelock(overlap); |
bf0442dc KM |
233 | needtolink = 0; |
234 | continue; | |
f3e2d1ca KM |
235 | |
236 | case 5: /* overlap ends after lock */ | |
bf0442dc KM |
237 | /* |
238 | * Add the new lock before overlap. | |
239 | */ | |
240 | if (needtolink) { | |
241 | *prev = lock; | |
242 | lock->lf_next = overlap; | |
243 | } | |
f3e2d1ca KM |
244 | overlap->lf_start = lock->lf_end + 1; |
245 | lf_wakelock(overlap); | |
bf0442dc | 246 | break; |
f3e2d1ca | 247 | } |
bf0442dc KM |
248 | break; |
249 | } | |
250 | out: | |
251 | #ifdef LOCKF_DEBUG | |
252 | if (lockf_debug & 1) { | |
253 | lf_print("lf_setlock: got the lock", lock); | |
254 | lf_printlist("lf_setlock", lock); | |
f3e2d1ca | 255 | } |
bf0442dc KM |
256 | #endif /* LOCKF_DEBUG */ |
257 | return (0); | |
258 | } | |
259 | ||
260 | /* | |
261 | * Remove a byte-range lock on an inode. | |
262 | * | |
263 | * Generally, find the lock (or an overlap to that lock) | |
264 | * and remove it (or shrink it), then wakeup anyone we can. | |
265 | */ | |
266 | lf_clearlock(unlock) | |
267 | register struct lockf *unlock; | |
268 | { | |
269 | struct inode *ip = unlock->lf_inode; | |
270 | register struct lockf *lf = ip->i_lockf; | |
271 | struct lockf *overlap, **prev; | |
272 | int ovcase; | |
273 | ||
274 | if (lf == NOLOCKF) | |
275 | return (0); | |
276 | #ifdef LOCKF_DEBUG | |
277 | if (unlock->lf_type != F_UNLCK) | |
278 | panic("lf_clearlock: bad type"); | |
279 | if (lockf_debug & 1) | |
280 | lf_print("lf_clearlock", unlock); | |
281 | #endif /* LOCKF_DEBUG */ | |
282 | prev = &ip->i_lockf; | |
283 | while (ovcase = lf_findoverlap(lf, unlock, SELF, &prev, &overlap)) { | |
284 | /* | |
285 | * Wakeup the list of locks to be retried. | |
286 | */ | |
287 | lf_wakelock(overlap); | |
288 | ||
289 | switch (ovcase) { | |
290 | ||
291 | case 1: /* overlap == lock */ | |
292 | *prev = overlap->lf_next; | |
293 | FREE(overlap, M_LOCKF); | |
294 | break; | |
295 | ||
296 | case 2: /* overlap contains lock: split it */ | |
297 | if (overlap->lf_start == unlock->lf_start) { | |
298 | overlap->lf_start = unlock->lf_end + 1; | |
299 | break; | |
300 | } | |
301 | lf_split(overlap, unlock); | |
302 | overlap->lf_next = unlock->lf_next; | |
303 | break; | |
304 | ||
305 | case 3: /* lock contains overlap */ | |
306 | *prev = overlap->lf_next; | |
307 | lf = overlap->lf_next; | |
308 | free(overlap, M_LOCKF); | |
309 | continue; | |
310 | ||
311 | case 4: /* overlap starts before lock */ | |
312 | overlap->lf_end = unlock->lf_start - 1; | |
313 | prev = &overlap->lf_next; | |
314 | lf = overlap->lf_next; | |
315 | continue; | |
316 | ||
317 | case 5: /* overlap ends after lock */ | |
318 | overlap->lf_start = unlock->lf_end + 1; | |
319 | break; | |
320 | } | |
321 | break; | |
322 | } | |
323 | #ifdef LOCKF_DEBUG | |
324 | if (lockf_debug & 1) | |
325 | lf_printlist("lf_clearlock", unlock); | |
326 | #endif /* LOCKF_DEBUG */ | |
327 | return (0); | |
328 | } | |
329 | ||
330 | /* | |
331 | * Check whether there is a blocking lock, | |
332 | * and if so return its process identifier. | |
333 | */ | |
334 | lf_getlock(lock, fl) | |
335 | register struct lockf *lock; | |
336 | register struct flock *fl; | |
337 | { | |
338 | register struct lockf *block; | |
339 | off_t start, end; | |
340 | ||
341 | #ifdef LOCKF_DEBUG | |
342 | if (lockf_debug & 1) | |
343 | lf_print("lf_getlock", lock); | |
344 | #endif /* LOCKF_DEBUG */ | |
345 | ||
346 | if (block = lf_getblock(lock)) { | |
347 | fl->l_type = block->lf_type; | |
348 | fl->l_whence = SEEK_SET; | |
349 | fl->l_start = block->lf_start; | |
350 | if (block->lf_end == -1) | |
351 | fl->l_len = 0; | |
352 | else | |
353 | fl->l_len = block->lf_end - block->lf_start + 1; | |
354 | if (block->lf_flags & F_POSIX) | |
355 | fl->l_pid = ((struct proc *)(block->lf_id))->p_pid; | |
356 | else | |
357 | fl->l_pid = -1; | |
00209f6c KM |
358 | } else { |
359 | fl->l_type = F_UNLCK; | |
bf0442dc KM |
360 | } |
361 | return (0); | |
f3e2d1ca KM |
362 | } |
363 | ||
364 | /* | |
365 | * Walk the list of locks for an inode and | |
366 | * return the first blocking lock. | |
367 | */ | |
368 | struct lockf * | |
369 | lf_getblock(lock) | |
370 | register struct lockf *lock; | |
371 | { | |
bf0442dc | 372 | struct lockf **prev, *overlap, *lf = lock->lf_inode->i_lockf; |
f3e2d1ca KM |
373 | int ovcase; |
374 | ||
bf0442dc KM |
375 | prev = &lock->lf_inode->i_lockf; |
376 | while (ovcase = lf_findoverlap(lf, lock, OTHERS, &prev, &overlap)) { | |
f3e2d1ca KM |
377 | /* |
378 | * We've found an overlap, see if it blocks us | |
379 | */ | |
bf0442dc | 380 | if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK)) |
f3e2d1ca KM |
381 | return (overlap); |
382 | /* | |
383 | * Nope, point to the next one on the list and | |
384 | * see if it blocks us | |
385 | */ | |
386 | lf = overlap->lf_next; | |
387 | } | |
bf0442dc | 388 | return (NOLOCKF); |
f3e2d1ca KM |
389 | } |
390 | ||
391 | /* | |
392 | * Walk the list of locks for an inode to | |
393 | * find an overlapping lock (if any). | |
394 | * | |
395 | * NOTE: this returns only the FIRST overlapping lock. There | |
396 | * may be more than one. | |
397 | */ | |
bf0442dc KM |
398 | lf_findoverlap(lf, lock, type, prev, overlap) |
399 | register struct lockf *lf; | |
f3e2d1ca | 400 | struct lockf *lock; |
bf0442dc KM |
401 | int type; |
402 | struct lockf ***prev; | |
f3e2d1ca KM |
403 | struct lockf **overlap; |
404 | { | |
f3e2d1ca KM |
405 | off_t start, end; |
406 | ||
f3e2d1ca | 407 | *overlap = lf; |
bf0442dc | 408 | if (lf == NOLOCKF) |
f3e2d1ca KM |
409 | return (0); |
410 | #ifdef LOCKF_DEBUG | |
bf0442dc | 411 | if (lockf_debug & 2) |
f3e2d1ca KM |
412 | lf_print("lf_findoverlap: looking for overlap in", lock); |
413 | #endif /* LOCKF_DEBUG */ | |
414 | start = lock->lf_start; | |
415 | end = lock->lf_end; | |
bf0442dc KM |
416 | while (lf != NOLOCKF) { |
417 | if (((type & SELF) && lf->lf_id != lock->lf_id) || | |
418 | ((type & OTHERS) && lf->lf_id == lock->lf_id)) { | |
419 | *prev = &lf->lf_next; | |
420 | *overlap = lf = lf->lf_next; | |
421 | continue; | |
422 | } | |
423 | #ifdef LOCKF_DEBUG | |
424 | if (lockf_debug & 2) | |
f3e2d1ca KM |
425 | lf_print("\tchecking", lf); |
426 | #endif /* LOCKF_DEBUG */ | |
427 | /* | |
bf0442dc | 428 | * OK, check for overlap |
f3e2d1ca | 429 | * |
bf0442dc KM |
430 | * Six cases: |
431 | * 0) no overlap | |
f3e2d1ca KM |
432 | * 1) overlap == lock |
433 | * 2) overlap contains lock | |
434 | * 3) lock contains overlap | |
435 | * 4) overlap starts before lock | |
436 | * 5) overlap ends after lock | |
437 | */ | |
bf0442dc KM |
438 | if ((lf->lf_end != -1 && start > lf->lf_end) || |
439 | (end != -1 && lf->lf_start > end)) { | |
440 | /* Case 0 */ | |
441 | #ifdef LOCKF_DEBUG | |
442 | if (lockf_debug & 2) | |
443 | printf("no overlap\n"); | |
444 | #endif /* LOCKF_DEBUG */ | |
445 | if ((type & SELF) && end != -1 && lf->lf_start > end) | |
446 | return (0); | |
447 | *prev = &lf->lf_next; | |
448 | *overlap = lf = lf->lf_next; | |
449 | continue; | |
450 | } | |
f3e2d1ca KM |
451 | if ((lf->lf_start == start) && (lf->lf_end == end)) { |
452 | /* Case 1 */ | |
453 | #ifdef LOCKF_DEBUG | |
bf0442dc KM |
454 | if (lockf_debug & 2) |
455 | printf("overlap == lock\n"); | |
f3e2d1ca KM |
456 | #endif /* LOCKF_DEBUG */ |
457 | return (1); | |
bf0442dc KM |
458 | } |
459 | if ((lf->lf_start <= start) && | |
f3e2d1ca KM |
460 | (end != -1) && |
461 | ((lf->lf_end >= end) || (lf->lf_end == -1))) { | |
462 | /* Case 2 */ | |
463 | #ifdef LOCKF_DEBUG | |
bf0442dc KM |
464 | if (lockf_debug & 2) |
465 | printf("overlap contains lock\n"); | |
f3e2d1ca KM |
466 | #endif /* LOCKF_DEBUG */ |
467 | return (2); | |
bf0442dc KM |
468 | } |
469 | if (start <= lf->lf_start && | |
470 | (end == -1 || | |
471 | (lf->lf_end != -1 && end >= lf->lf_end))) { | |
f3e2d1ca KM |
472 | /* Case 3 */ |
473 | #ifdef LOCKF_DEBUG | |
bf0442dc KM |
474 | if (lockf_debug & 2) |
475 | printf("lock contains overlap\n"); | |
f3e2d1ca KM |
476 | #endif /* LOCKF_DEBUG */ |
477 | return (3); | |
bf0442dc KM |
478 | } |
479 | if ((lf->lf_start < start) && | |
f3e2d1ca KM |
480 | ((lf->lf_end >= start) || (lf->lf_end == -1))) { |
481 | /* Case 4 */ | |
482 | #ifdef LOCKF_DEBUG | |
bf0442dc KM |
483 | if (lockf_debug & 2) |
484 | printf("overlap starts before lock\n"); | |
f3e2d1ca KM |
485 | #endif /* LOCKF_DEBUG */ |
486 | return (4); | |
bf0442dc KM |
487 | } |
488 | if ((lf->lf_start > start) && | |
f3e2d1ca KM |
489 | (end != -1) && |
490 | ((lf->lf_end > end) || (lf->lf_end == -1))) { | |
491 | /* Case 5 */ | |
492 | #ifdef LOCKF_DEBUG | |
bf0442dc KM |
493 | if (lockf_debug & 2) |
494 | printf("overlap ends after lock\n"); | |
f3e2d1ca KM |
495 | #endif /* LOCKF_DEBUG */ |
496 | return (5); | |
497 | } | |
bf0442dc | 498 | panic("lf_findoverlap: default"); |
f3e2d1ca | 499 | } |
f3e2d1ca KM |
500 | return (0); |
501 | } | |
502 | ||
503 | /* | |
504 | * Add a lock to the end of the blocked list. | |
505 | */ | |
506 | lf_addblock(lock, blocked) | |
507 | struct lockf *lock; | |
508 | struct lockf *blocked; | |
509 | { | |
510 | register struct lockf *lf; | |
511 | ||
bf0442dc | 512 | if (blocked == NOLOCKF) |
f3e2d1ca | 513 | return; |
bf0442dc KM |
514 | #ifdef LOCKF_DEBUG |
515 | if (lockf_debug & 2) { | |
516 | lf_print("addblock: adding", blocked); | |
517 | lf_print("to blocked list of", lock); | |
518 | } | |
519 | #endif /* LOCKF_DEBUG */ | |
520 | if ((lf = lock->lf_block) == NOLOCKF) { | |
f3e2d1ca KM |
521 | lock->lf_block = blocked; |
522 | return; | |
523 | } | |
bf0442dc | 524 | while (lf->lf_block != NOLOCKF) |
f3e2d1ca KM |
525 | lf = lf->lf_block; |
526 | lf->lf_block = blocked; | |
527 | return; | |
528 | } | |
529 | ||
f3e2d1ca KM |
530 | /* |
531 | * Split a lock and a contained region into | |
bf0442dc | 532 | * two or three locks as necessary. |
f3e2d1ca KM |
533 | */ |
534 | lf_split(lock1, lock2) | |
535 | register struct lockf *lock1; | |
536 | register struct lockf *lock2; | |
537 | { | |
538 | register struct lockf *splitlock; | |
539 | ||
540 | #ifdef LOCKF_DEBUG | |
bf0442dc | 541 | if (lockf_debug & 2) { |
f3e2d1ca KM |
542 | lf_print("lf_split", lock1); |
543 | lf_print("splitting from", lock2); | |
544 | } | |
545 | #endif /* LOCKF_DEBUG */ | |
bf0442dc KM |
546 | /* |
547 | * Check to see if spliting into only two pieces. | |
548 | */ | |
549 | if (lock1->lf_start == lock2->lf_start) { | |
550 | lock1->lf_start = lock2->lf_end + 1; | |
551 | lock2->lf_next = lock1; | |
552 | return; | |
553 | } | |
554 | if (lock1->lf_end == lock2->lf_end) { | |
555 | lock1->lf_end = lock2->lf_start - 1; | |
556 | lock2->lf_next = lock1->lf_next; | |
557 | lock1->lf_next = lock2; | |
558 | return; | |
559 | } | |
f3e2d1ca KM |
560 | /* |
561 | * Make a new lock consisting of the last part of | |
562 | * the encompassing lock | |
563 | */ | |
564 | MALLOC(splitlock, struct lockf *, sizeof *splitlock, M_LOCKF, M_WAITOK); | |
565 | bcopy((caddr_t)lock1, (caddr_t)splitlock, sizeof *splitlock); | |
bf0442dc KM |
566 | splitlock->lf_start = lock2->lf_end + 1; |
567 | splitlock->lf_block = NOLOCKF; | |
f3e2d1ca KM |
568 | lock1->lf_end = lock2->lf_start - 1; |
569 | /* | |
570 | * OK, now link it in | |
571 | */ | |
572 | splitlock->lf_next = lock1->lf_next; | |
573 | lock2->lf_next = splitlock; | |
574 | lock1->lf_next = lock2; | |
575 | } | |
576 | ||
f3e2d1ca KM |
577 | /* |
578 | * Wakeup a blocklist | |
579 | */ | |
bf0442dc KM |
580 | lf_wakelock(listhead) |
581 | struct lockf *listhead; | |
f3e2d1ca | 582 | { |
bf0442dc | 583 | register struct lockf *blocklist, *wakelock; |
f3e2d1ca | 584 | |
bf0442dc KM |
585 | blocklist = listhead->lf_block; |
586 | listhead->lf_block = NOLOCKF; | |
587 | while (blocklist != NOLOCKF) { | |
f3e2d1ca KM |
588 | wakelock = blocklist; |
589 | blocklist = blocklist->lf_block; | |
bf0442dc KM |
590 | wakelock->lf_block = NOLOCKF; |
591 | wakelock->lf_next = NOLOCKF; | |
f3e2d1ca | 592 | #ifdef LOCKF_DEBUG |
bf0442dc KM |
593 | if (lockf_debug & 2) |
594 | lf_print("lf_wakelock: awakening", wakelock); | |
f3e2d1ca KM |
595 | #endif /* LOCKF_DEBUG */ |
596 | wakeup((caddr_t)wakelock); | |
597 | } | |
598 | } | |
599 | ||
600 | #ifdef LOCKF_DEBUG | |
601 | /* | |
602 | * Print out a lock. | |
603 | */ | |
604 | lf_print(tag, lock) | |
605 | char *tag; | |
bf0442dc | 606 | register struct lockf *lock; |
f3e2d1ca KM |
607 | { |
608 | ||
7a1ac898 | 609 | printf("%s: lock 0x%lx for ", tag, lock); |
bf0442dc KM |
610 | if (lock->lf_flags & F_POSIX) |
611 | printf("proc %d", ((struct proc *)(lock->lf_id))->p_pid); | |
612 | else | |
613 | printf("id 0x%x", lock->lf_id); | |
614 | printf(" in ino %d on dev <%d, %d>, %s, start %d, end %d", | |
615 | lock->lf_inode->i_number, | |
616 | major(lock->lf_inode->i_dev), | |
617 | minor(lock->lf_inode->i_dev), | |
f3e2d1ca KM |
618 | lock->lf_type == F_RDLCK ? "shared" : |
619 | lock->lf_type == F_WRLCK ? "exclusive" : | |
bf0442dc KM |
620 | lock->lf_type == F_UNLCK ? "unlock" : |
621 | "unknown", lock->lf_start, lock->lf_end); | |
622 | if (lock->lf_block) | |
623 | printf(" block 0x%x\n", lock->lf_block); | |
624 | else | |
625 | printf("\n"); | |
626 | } | |
627 | ||
628 | lf_printlist(tag, lock) | |
629 | char *tag; | |
630 | struct lockf *lock; | |
631 | { | |
632 | register struct lockf *lf; | |
633 | ||
634 | printf("%s: Lock list for ino %d on dev <%d, %d>:\n", | |
635 | tag, lock->lf_inode->i_number, | |
636 | major(lock->lf_inode->i_dev), | |
637 | minor(lock->lf_inode->i_dev)); | |
638 | for (lf = lock->lf_inode->i_lockf; lf; lf = lf->lf_next) { | |
7a1ac898 | 639 | printf("\tlock 0x%lx for ", lf); |
bf0442dc KM |
640 | if (lf->lf_flags & F_POSIX) |
641 | printf("proc %d", ((struct proc *)(lf->lf_id))->p_pid); | |
642 | else | |
643 | printf("id 0x%x", lf->lf_id); | |
644 | printf(", %s, start %d, end %d", | |
645 | lf->lf_type == F_RDLCK ? "shared" : | |
646 | lf->lf_type == F_WRLCK ? "exclusive" : | |
647 | lf->lf_type == F_UNLCK ? "unlock" : | |
648 | "unknown", lf->lf_start, lf->lf_end); | |
649 | if (lf->lf_block) | |
650 | printf(" block 0x%x\n", lf->lf_block); | |
651 | else | |
652 | printf("\n"); | |
653 | } | |
f3e2d1ca KM |
654 | } |
655 | #endif /* LOCKF_DEBUG */ |