* Copyright (c) 1988 University of Utah.
* Copyright (c) 1992 OMRON Corporation.
* Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
* This code is derived from software contributed to Berkeley by
* the Systems Programming Group of the University of Utah Computer
* %sccs.include.redist.c%
* from: Utah $Hdr: vm_machdep.c 1.21 91/04/06$
* from: hp300/hp300/vm_machdep.c 7.14 (Berkeley) 12/27/92
* @(#)vm_machdep.c 7.5 (Berkeley) %G%
#include <luna68k/luna68k/pte.h>
* Finish a fork operation, with process p2 nearly set up.
* Copy and update the kernel stack and pcb, making the child
* ready to run, and marking it so that it can return differently
* than the parent. Returns 1 in the child process, 0 in the parent.
* We currently double-map the user area so that the stack is at the same
* address in each process; in the future we will probably relocate
* the frame pointers on the stack after copying.
register struct proc
*p1
, *p2
;
register struct user
*up
= p2
->p_addr
;
p2
->p_md
.md_regs
= p1
->p_md
.md_regs
;
p2
->p_md
.md_flags
= (p1
->p_md
.md_flags
& ~(MDP_AST
|MDP_HPUXTRACE
));
* Copy pcb and stack from proc p1 to p2.
* We do this as cheaply as possible, copying only the active
* part of the stack. The stack and pcb need to agree;
* this is tricky, as the final pcb is constructed by savectx,
* but its frame isn't yet on the stack when the stack is copied.
* swtch compensates for this when the child eventually runs.
* This should be done differently, with a single call
* that copies and updates the pcb+stack,
* replacing the bcopy and savectx.
p2
->p_addr
->u_pcb
= p1
->p_addr
->u_pcb
;
offset
= getsp() - kstack
;
bcopy((caddr_t
)kstack
+ offset
, (caddr_t
)p2
->p_addr
+ offset
,
(unsigned) ctob(UPAGES
) - offset
);
PMAP_ACTIVATE(&p2
->p_vmspace
->vm_pmap
, &up
->u_pcb
, 0);
* Arrange for a non-local goto when the new process
* is started, to resume here, returning nonzero from setjmp.
* cpu_exit is called as the last action during exit.
* We release the address space and machine-dependent resources,
* including the memory for the user structure and kernel stack.
* Once finished, we call swtch_exit, which switches to a temporary
* pcb and stack and never returns. We block memory allocation
* until swtch_exit has made things safe again.
vmspace_free(p
->p_vmspace
);
kmem_free(kernel_map
, (vm_offset_t
)p
->p_addr
, ctob(UPAGES
));
* Dump the machine specific header information at the start of a core dump.
cpu_coredump(p
, vp
, cred
)
return (vn_rdwr(UIO_WRITE
, vp
, (caddr_t
) p
->p_addr
, ctob(UPAGES
),
(off_t
)0, UIO_SYSSPACE
, IO_NODELOCKED
|IO_UNIT
, cred
, (int *) NULL
,
* Move pages from one kernel virtual address to another.
* Both addresses are assumed to reside in the Sysmap,
* and size must be a multiple of CLSIZE.
register caddr_t from
, to
;
register struct pte
*fpte
, *tpte
;
* Map `size' bytes of physical memory starting at `paddr' into
* kernel VA space at `vaddr'. Read/write and cache-inhibit status
* are specified by `prot'.
physaccess(vaddr
, paddr
, size
, prot
)
register struct pte
*pte
;
page
= (u_int
)paddr
& PG_FRAME
;
for (size
= btoc(size
); size
; size
--) {
*(int *)pte
++ = PG_V
| prot
| page
;
physunaccess(vaddr
, size
)
register struct pte
*pte
;
for (size
= btoc(size
); size
; size
--)
* Set a red zone in the kernel stack after the u. area.
* We don't support a redzone right now. It really isn't clear
* that it is a good idea since, if the kernel stack were to roll
* into a write protected page, the processor would lock up (since
* it cannot create an exception frame) and we would get no useful
* post-mortem info. Currently, under the DEBUG option, we just
* check at every clock interrupt to see if the current k-stack has
* gone too far (i.e. into the "redzone" page) and if so, panic.
* Look at _lev6intr in locore.s for more details.
* Convert kernel VA to physical address
va
= pmap_extract(kernel_pmap
, (vm_offset_t
)addr
);
panic("kvtop: zero page frame");
extern vm_map_t phys_map
;
* Map an IO request into kernel virtual address space.
* XXX we allocate KVA space by using kmem_alloc_wait which we know
* allocates space without backing physical memory. This implementation
* is a total crock, the multiple mappings of these physical pages should
* be reflected in the higher-level VM structures to avoid problems.
register long flags
= bp
->b_flags
;
if ((flags
& B_PHYS
) == 0)
addr
= bp
->b_saveaddr
= bp
->b_un
.b_addr
;
off
= (int)addr
& PGOFSET
;
npf
= btoc(round_page(bp
->b_bcount
+ off
));
kva
= kmem_alloc_wait(phys_map
, ctob(npf
));
bp
->b_un
.b_addr
= (caddr_t
) (kva
+ off
);
pa
= pmap_extract(vm_map_pmap(&p
->p_vmspace
->vm_map
),
panic("vmapbuf: null page frame");
pmap_enter(vm_map_pmap(phys_map
), kva
, trunc_page(pa
),
VM_PROT_READ
|VM_PROT_WRITE
, TRUE
);
* Free the io map PTEs associated with this IO operation.
register caddr_t addr
= bp
->b_un
.b_addr
;
if ((bp
->b_flags
& B_PHYS
) == 0)
npf
= btoc(round_page(bp
->b_bcount
+ ((int)addr
& PGOFSET
)));
kva
= (vm_offset_t
)((int)addr
& ~PGOFSET
);
kmem_free_wakeup(phys_map
, kva
, ctob(npf
));
bp
->b_un
.b_addr
= bp
->b_saveaddr
;