* Copyright (c) 1982, 1986 The Regents of the University of California.
* Copyright (c) 1989, 1990 William Jolitz
* This code is derived from software contributed to Berkeley by
* the Systems Programming Group of the University of Utah Computer
* Science Department, and William Jolitz.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
* $Id: vm_machdep.c,v 1.9 1994/01/14 16:23:42 davidg Exp $
#include "../include/cpu.h"
* Finish a fork operation, with process p2 nearly set up.
* Copy and update the kernel stack and pcb, making the child
* ready to run, and marking it so that it can return differently
* than the parent. Returns 1 in the child process, 0 in the parent.
* We currently double-map the user area so that the stack is at the same
* address in each process; in the future we will probably relocate
* the frame pointers on the stack after copying.
register struct proc
*p1
, *p2
;
register struct user
*up
= p2
->p_addr
;
int foo
, offset
, addr
, i
;
* Copy pcb and stack from proc p1 to p2.
* We do this as cheaply as possible, copying only the active
* part of the stack. The stack and pcb need to agree;
* this is tricky, as the final pcb is constructed by savectx,
* but its frame isn't yet on the stack when the stack is copied.
* swtch compensates for this when the child eventually runs.
* This should be done differently, with a single call
* that copies and updates the pcb+stack,
* replacing the bcopy and savectx.
p2
->p_addr
->u_pcb
= p1
->p_addr
->u_pcb
;
offset
= mvesp() - (int)kstack
;
bcopy((caddr_t
)kstack
+ offset
, (caddr_t
)p2
->p_addr
+ offset
,
(unsigned) ctob(UPAGES
) - offset
);
* Wire top of address space of child to it's kstack.
* First, fault in a page of pte's to map it.
addr
= trunc_page((u_int
)vtopte(kstack
));
vm_map_pageable(&p2
->p_vmspace
->vm_map
, addr
, addr
+NBPG
, FALSE
);
for (i
=0; i
< UPAGES
; i
++)
pmap_enter(&p2
->p_vmspace
->vm_pmap
, kstack
+i
*NBPG
,
pmap_extract(kernel_pmap
, ((int)p2
->p_addr
)+i
*NBPG
),
* The user area has to be mapped writable because
* it contains the kernel stack (when CR0_WP is on
* on a 486 there is no user-read/kernel-write
* mode). It is protected from user mode access
VM_PROT_READ
|VM_PROT_WRITE
, TRUE
);
pmap_activate(&p2
->p_vmspace
->vm_pmap
, &up
->u_pcb
);
* Arrange for a non-local goto when the new process
* is started, to resume here, returning nonzero from setjmp.
* cpu_exit is called as the last action during exit.
* We change to an inactive address space and a "safe" stack,
* passing thru an argument to the new stack. Now, safely isolated
* from the resources we're shedding, we release the address space
* and any remaining machine-dependent resources, including the
* memory for the user structure and kernel stack.
* Next, we assign a dummy context to be written over by swtch,
* calling it to send this process off to oblivion.
* [The nullpcb allows us to minimize cost in swtch() by not having
struct proc
*swtch_to_inactive();
static struct pcb nullpcb
; /* pcb to overwrite on last swtch */
/* move to inactive space and stack, passing arg accross */
p
= swtch_to_inactive(p
);
/* drop per-process resources */
vmspace_free(p
->p_vmspace
);
kmem_free(kernel_map
, (vm_offset_t
)p
->p_addr
, ctob(UPAGES
));
p
->p_addr
= (struct user
*) &nullpcb
;
* This is to shutup the compiler, and if swtch() failed I suppose
* this would be a good thing. This keeps gcc happy because panic
* is a volatile void function as well.
cpu_wait(p
) struct proc
*p
; {
/* extern vm_map_t upages_map; */
/* drop per-process resources */
pmap_remove(vm_map_pmap(kernel_map
), (vm_offset_t
) p
->p_addr
,
((vm_offset_t
) p
->p_addr
) + ctob(UPAGES
));
kmem_free(kernel_map
, (vm_offset_t
)p
->p_addr
, ctob(UPAGES
));
vmspace_free(p
->p_vmspace
);
* Set a red zone in the kernel stack after the u. area.
/* eventually do this by setting up an expand-down stack segment
for ss0: selector, allowing stack access down to top of u.
this means though that protection violations need to be handled
thru a double fault exception that must do an integral task
switch to a known good context, within which a dump can be
taken. a sensible scheme might be to save the initial context
used by sched (that has physical memory mapped 1:1 at bottom)
and take the dump while still in mapped mode */
* Move pages from one kernel virtual address to another.
* Both addresses are assumed to reside in the Sysmap,
* and size must be a multiple of CLSIZE.
register caddr_t from
, to
;
register struct pte
*fpte
, *tpte
;
* Convert kernel VA to physical address
va
= pmap_extract(kernel_pmap
, (vm_offset_t
)addr
);
panic("kvtop: zero page frame");
extern vm_map_t phys_map
;
* Map an IO request into kernel virtual address space. Requests fall into
* one of five catagories:
* B_PHYS|B_UAREA: User u-area swap.
* Address is relative to start of u-area (p_addr).
* B_PHYS|B_PAGET: User page table swap.
* Address is a kernel VA in usrpt (Usrptmap).
* B_PHYS|B_DIRTY: Dirty page push.
* Address is a VA in proc2's address space.
* B_PHYS|B_PGIN: Kernel pagein of user pages.
* Address is VA in user's address space.
* B_PHYS: User "raw" IO request.
* Address is VA in user's address space.
* All requests are (re)mapped into kernel VA space via the useriomap
* (a name with only slightly more meaning than "kernelmap")
register long flags
= bp
->b_flags
;
if ((flags
& B_PHYS
) == 0)
addr
= bp
->b_saveaddr
= bp
->b_un
.b_addr
;
off
= (int)addr
& PGOFSET
;
npf
= btoc(round_page(bp
->b_bcount
+ off
));
kva
= kmem_alloc_wait(phys_map
, ctob(npf
));
bp
->b_un
.b_addr
= (caddr_t
) (kva
+ off
);
pa
= pmap_extract(&p
->p_vmspace
->vm_pmap
, (vm_offset_t
)addr
);
panic("vmapbuf: null page frame");
pmap_enter(vm_map_pmap(phys_map
), kva
, trunc_page(pa
),
VM_PROT_READ
|VM_PROT_WRITE
, TRUE
);
* Free the io map PTEs associated with this IO operation.
* We also invalidate the TLB entries and restore the original b_addr.
register caddr_t addr
= bp
->b_un
.b_addr
;
if ((bp
->b_flags
& B_PHYS
) == 0)
npf
= btoc(round_page(bp
->b_bcount
+ ((int)addr
& PGOFSET
)));
kva
= (vm_offset_t
)((int)addr
& ~PGOFSET
);
kmem_free_wakeup(phys_map
, kva
, ctob(npf
));
bp
->b_un
.b_addr
= bp
->b_saveaddr
;
* Force reset the processor by invalidating the entire address space!
/* force a shutdown by unmapping entire address space ! */
bzero((caddr_t
) PTD
, NBPG
);
/* "good night, sweet prince .... <THUNK!>" */