5af4cd0cd4b2f9b08e04982bad09515d548696e2
* Copyright (c) 1982, 1986, 1989 Regents of the University of California.
* This code is derived from software contributed to Berkeley by
* Scooter Morris at Genentech Inc.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* @(#)ufs_lockf.c 7.7 (Berkeley) 7/2/91
* This variable controls the maximum number of processes that will
* be checked in doing deadlock detection.
int maxlockdepth
= MAXDEPTH
;
#define NOLOCKF (struct lockf *)0
register struct lockf
*lock
;
register struct lockf
*block
;
struct inode
*ip
= lock
->lf_inode
;
struct lockf
**prev
, *overlap
, *ltmp
;
static char lockstr
[] = "lockf";
int ovcase
, priority
, needtolink
, error
;
lf_print("lf_setlock", lock
);
if (lock
->lf_type
== F_WRLCK
)
* Scan lock list for this file looking for locks that would block us.
while (block
= lf_getblock(lock
)) {
* Free the structure and return if nonblocking.
if ((lock
->lf_flags
& F_WAIT
) == 0) {
* We are blocked. Since flock style locks cover
* the whole file, there is no chance for deadlock.
* For byte-range locks we must check for deadlock.
* Deadlock detection is done by looking through the
* wait channels to see if there are any cycles that
* involve us. MAXDEPTH is set just to make sure we
* do not go off into neverland.
if ((lock
->lf_flags
& F_POSIX
) &&
(block
->lf_flags
& F_POSIX
)) {
register struct proc
*wproc
;
register struct lockf
*waitblock
;
/* The block is waiting on something */
wproc
= (struct proc
*)block
->lf_id
;
(wproc
->p_wmesg
== lockstr
) &&
waitblock
= (struct lockf
*)wproc
->p_wchan
;
/* Get the owner of the blocking lock */
waitblock
= waitblock
->lf_next
;
if ((waitblock
->lf_flags
& F_POSIX
) == 0)
wproc
= (struct proc
*)waitblock
->lf_id
;
if (wproc
== (struct proc
*)lock
->lf_id
) {
* For flock type locks, we must first remove
* any shared locks that we hold before we sleep
* waiting for an exclusive lock.
if ((lock
->lf_flags
& F_FLOCK
) &&
lock
->lf_type
== F_WRLCK
) {
(void) lf_clearlock(lock
);
* Add our lock to the blocked list and sleep until we're free.
* Remember who blocked us (for deadlock detection).
lf_addblock(block
, lock
);
lf_print("lf_setlock: blocking on", block
);
lf_printlist("lf_setlock", block
);
if (error
= tsleep((caddr_t
)lock
, priority
, lockstr
, 0)) {
* No blocks!! Add the lock. Note that we will
* downgrade or upgrade any overlapping locks this
* Skip over locks owned by other processes.
* Handle any locks that overlap and are owned by ourselves.
if (ovcase
= lf_findoverlap(block
, lock
, SELF
, &prev
, &overlap
))
block
= overlap
->lf_next
;
* 2) overlap contains lock
* 3) lock contains overlap
* 4) overlap starts before lock
* 5) overlap ends after lock
case 1: /* overlap == lock */
* If downgrading lock, others may be
if (lock
->lf_type
== F_RDLCK
&&
overlap
->lf_type
== F_WRLCK
)
overlap
->lf_type
= lock
->lf_type
;
lock
= overlap
; /* for debug output below */
case 2: /* overlap contains lock */
* Check for common starting point and different types.
if (overlap
->lf_type
== lock
->lf_type
) {
lock
= overlap
; /* for debug output below */
if (overlap
->lf_start
== lock
->lf_start
) {
overlap
->lf_start
= lock
->lf_end
+ 1;
case 3: /* lock contains overlap */
* If downgrading lock, others may be able to
* acquire it, otherwise take the list.
if (lock
->lf_type
== F_RDLCK
&&
overlap
->lf_type
== F_WRLCK
) {
lock
->lf_block
= overlap
->lf_block
;
* Add the new lock if necessary and delete the overlap.
lock
->lf_next
= overlap
->lf_next
;
*prev
= overlap
->lf_next
;
case 4: /* overlap starts before lock */
* Add lock after overlap on the list.
lock
->lf_next
= overlap
->lf_next
;
overlap
->lf_end
= lock
->lf_start
- 1;
case 5: /* overlap ends after lock */
* Add the new lock before overlap.
overlap
->lf_start
= lock
->lf_end
+ 1;
lf_print("lf_setlock: got the lock", lock
);
lf_printlist("lf_setlock", lock
);
* Remove a byte-range lock on an inode.
* Generally, find the lock (or an overlap to that lock)
* and remove it (or shrink it), then wakeup anyone we can.
register struct lockf
*unlock
;
struct inode
*ip
= unlock
->lf_inode
;
register struct lockf
*lf
= ip
->i_lockf
;
struct lockf
*overlap
, **prev
;
if (unlock
->lf_type
!= F_UNLCK
)
panic("lf_clearlock: bad type");
lf_print("lf_clearlock", unlock
);
while (ovcase
= lf_findoverlap(lf
, unlock
, SELF
, &prev
, &overlap
)) {
* Wakeup the list of locks to be retried.
case 1: /* overlap == lock */
*prev
= overlap
->lf_next
;
case 2: /* overlap contains lock: split it */
if (overlap
->lf_start
== unlock
->lf_start
) {
overlap
->lf_start
= unlock
->lf_end
+ 1;
lf_split(overlap
, unlock
);
overlap
->lf_next
= unlock
->lf_next
;
case 3: /* lock contains overlap */
*prev
= overlap
->lf_next
;
case 4: /* overlap starts before lock */
overlap
->lf_end
= unlock
->lf_start
- 1;
prev
= &overlap
->lf_next
;
case 5: /* overlap ends after lock */
overlap
->lf_start
= unlock
->lf_end
+ 1;
lf_printlist("lf_clearlock", unlock
);
* Check whether there is a blocking lock,
* and if so return its process identifier.
register struct lockf
*lock
;
register struct flock
*fl
;
register struct lockf
*block
;
lf_print("lf_getlock", lock
);
if (block
= lf_getblock(lock
)) {
fl
->l_type
= block
->lf_type
;
fl
->l_start
= block
->lf_start
;
fl
->l_len
= block
->lf_end
- block
->lf_start
+ 1;
if (block
->lf_flags
& F_POSIX
)
fl
->l_pid
= ((struct proc
*)(block
->lf_id
))->p_pid
;
* Walk the list of locks for an inode and
* return the first blocking lock.
register struct lockf
*lock
;
struct lockf
**prev
, *overlap
, *lf
= lock
->lf_inode
->i_lockf
;
prev
= &lock
->lf_inode
->i_lockf
;
while (ovcase
= lf_findoverlap(lf
, lock
, OTHERS
, &prev
, &overlap
)) {
* We've found an overlap, see if it blocks us
if ((lock
->lf_type
== F_WRLCK
|| overlap
->lf_type
== F_WRLCK
))
* Nope, point to the next one on the list and
* Walk the list of locks for an inode to
* find an overlapping lock (if any).
* NOTE: this returns only the FIRST overlapping lock. There
lf_findoverlap(lf
, lock
, type
, prev
, overlap
)
register struct lockf
*lf
;
lf_print("lf_findoverlap: looking for overlap in", lock
);
if (((type
& SELF
) && lf
->lf_id
!= lock
->lf_id
) ||
((type
& OTHERS
) && lf
->lf_id
== lock
->lf_id
)) {
*overlap
= lf
= lf
->lf_next
;
lf_print("\tchecking", lf
);
* 2) overlap contains lock
* 3) lock contains overlap
* 4) overlap starts before lock
* 5) overlap ends after lock
if ((lf
->lf_end
!= -1 && start
> lf
->lf_end
) ||
(end
!= -1 && lf
->lf_start
> end
)) {
if ((type
& SELF
) && end
!= -1 && lf
->lf_start
> end
)
*overlap
= lf
= lf
->lf_next
;
if ((lf
->lf_start
== start
) && (lf
->lf_end
== end
)) {
printf("overlap == lock\n");
if ((lf
->lf_start
<= start
) &&
((lf
->lf_end
>= end
) || (lf
->lf_end
== -1))) {
printf("overlap contains lock\n");
if (start
<= lf
->lf_start
&&
(lf
->lf_end
!= -1 && end
>= lf
->lf_end
))) {
printf("lock contains overlap\n");
if ((lf
->lf_start
< start
) &&
((lf
->lf_end
>= start
) || (lf
->lf_end
== -1))) {
printf("overlap starts before lock\n");
if ((lf
->lf_start
> start
) &&
((lf
->lf_end
> end
) || (lf
->lf_end
== -1))) {
printf("overlap ends after lock\n");
panic("lf_findoverlap: default");
* Add a lock to the end of the blocked list.
lf_addblock(lock
, blocked
)
register struct lockf
*lf
;
lf_print("addblock: adding", blocked
);
lf_print("to blocked list of", lock
);
if ((lf
= lock
->lf_block
) == NOLOCKF
) {
lock
->lf_block
= blocked
;
while (lf
->lf_block
!= NOLOCKF
)
* Split a lock and a contained region into
* two or three locks as necessary.
register struct lockf
*lock1
;
register struct lockf
*lock2
;
register struct lockf
*splitlock
;
lf_print("lf_split", lock1
);
lf_print("splitting from", lock2
);
* Check to see if spliting into only two pieces.
if (lock1
->lf_start
== lock2
->lf_start
) {
lock1
->lf_start
= lock2
->lf_end
+ 1;
if (lock1
->lf_end
== lock2
->lf_end
) {
lock1
->lf_end
= lock2
->lf_start
- 1;
lock2
->lf_next
= lock1
->lf_next
;
* Make a new lock consisting of the last part of
MALLOC(splitlock
, struct lockf
*, sizeof *splitlock
, M_LOCKF
, M_WAITOK
);
bcopy((caddr_t
)lock1
, (caddr_t
)splitlock
, sizeof *splitlock
);
splitlock
->lf_start
= lock2
->lf_end
+ 1;
splitlock
->lf_block
= NOLOCKF
;
lock1
->lf_end
= lock2
->lf_start
- 1;
splitlock
->lf_next
= lock1
->lf_next
;
lock2
->lf_next
= splitlock
;
register struct lockf
*blocklist
, *wakelock
;
blocklist
= listhead
->lf_block
;
listhead
->lf_block
= NOLOCKF
;
while (blocklist
!= NOLOCKF
) {
blocklist
= blocklist
->lf_block
;
wakelock
->lf_block
= NOLOCKF
;
wakelock
->lf_next
= NOLOCKF
;
lf_print("lf_wakelock: awakening", wakelock
);
wakeup((caddr_t
)wakelock
);
register struct lockf
*lock
;
printf("%s: lock 0x%lx for ", tag
, lock
);
if (lock
->lf_flags
& F_POSIX
)
printf("proc %d", ((struct proc
*)(lock
->lf_id
))->p_pid
);
printf("id 0x%x", lock
->lf_id
);
printf(" in ino %d on dev <%d, %d>, %s, start %d, end %d",
lock
->lf_inode
->i_number
,
major(lock
->lf_inode
->i_dev
),
minor(lock
->lf_inode
->i_dev
),
lock
->lf_type
== F_RDLCK
? "shared" :
lock
->lf_type
== F_WRLCK
? "exclusive" :
lock
->lf_type
== F_UNLCK
? "unlock" :
"unknown", lock
->lf_start
, lock
->lf_end
);
printf(" block 0x%x\n", lock
->lf_block
);
register struct lockf
*lf
;
printf("%s: Lock list for ino %d on dev <%d, %d>:\n",
tag
, lock
->lf_inode
->i_number
,
major(lock
->lf_inode
->i_dev
),
minor(lock
->lf_inode
->i_dev
));
for (lf
= lock
->lf_inode
->i_lockf
; lf
; lf
= lf
->lf_next
) {
printf("\tlock 0x%lx for ", lf
);
if (lf
->lf_flags
& F_POSIX
)
printf("proc %d", ((struct proc
*)(lf
->lf_id
))->p_pid
);
printf("id 0x%x", lf
->lf_id
);
printf(", %s, start %d, end %d",
lf
->lf_type
== F_RDLCK
? "shared" :
lf
->lf_type
== F_WRLCK
? "exclusive" :
lf
->lf_type
== F_UNLCK
? "unlock" :
"unknown", lf
->lf_start
, lf
->lf_end
);
printf(" block 0x%x\n", lf
->lf_block
);