BSD 4_4_Lite1 release
[unix-history] / usr / src / sys / luna68k / luna68k / vm_machdep.c
/*
* Copyright (c) 1988 University of Utah.
* Copyright (c) 1992 OMRON Corporation.
* Copyright (c) 1982, 1986, 1990, 1993
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* the Systems Programming Group of the University of Utah Computer
* Science Department.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: Utah $Hdr: vm_machdep.c 1.21 91/04/06$
* from: hp300/hp300/vm_machdep.c 8.4 (Berkeley) 11/14/93
*
* @(#)vm_machdep.c 8.3 (Berkeley) 12/6/93
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/malloc.h>
#include <sys/buf.h>
#include <sys/vnode.h>
#include <sys/user.h>
#include <machine/cpu.h>
#include <vm/vm.h>
#include <vm/vm_kern.h>
#include <luna68k/luna68k/pte.h>
/*
* Finish a fork operation, with process p2 nearly set up.
* Copy and update the kernel stack and pcb, making the child
* ready to run, and marking it so that it can return differently
* than the parent. Returns 1 in the child process, 0 in the parent.
* We currently double-map the user area so that the stack is at the same
* address in each process; in the future we will probably relocate
* the frame pointers on the stack after copying.
*/
cpu_fork(p1, p2)
register struct proc *p1, *p2;
{
register struct user *up = p2->p_addr;
int offset;
extern caddr_t getsp();
extern char kstack[];
p2->p_md.md_regs = p1->p_md.md_regs;
p2->p_md.md_flags = (p1->p_md.md_flags & ~(MDP_AST|MDP_HPUXTRACE));
/*
* Copy pcb and stack from proc p1 to p2.
* We do this as cheaply as possible, copying only the active
* part of the stack. The stack and pcb need to agree;
* this is tricky, as the final pcb is constructed by savectx,
* but its frame isn't yet on the stack when the stack is copied.
* switch compensates for this when the child eventually runs.
* This should be done differently, with a single call
* that copies and updates the pcb+stack,
* replacing the bcopy and savectx.
*/
p2->p_addr->u_pcb = p1->p_addr->u_pcb;
offset = getsp() - kstack;
bcopy((caddr_t)kstack + offset, (caddr_t)p2->p_addr + offset,
(unsigned) ctob(UPAGES) - offset);
PMAP_ACTIVATE(&p2->p_vmspace->vm_pmap, &up->u_pcb, 0);
/*
* Arrange for a non-local goto when the new process
* is started, to resume here, returning nonzero from setjmp.
*/
if (savectx(up, 1)) {
/*
* Return 1 in child.
*/
return (1);
}
return (0);
}
/*
* cpu_exit is called as the last action during exit.
* We release the address space and machine-dependent resources,
* including the memory for the user structure and kernel stack.
* Once finished, we call switch_exit, which switches to a temporary
* pcb and stack and never returns. We block memory allocation
* until switch_exit has made things safe again.
*/
cpu_exit(p)
struct proc *p;
{
vmspace_free(p->p_vmspace);
(void) splimp();
kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
switch_exit();
/* NOTREACHED */
}
/*
* Dump the machine specific header information at the start of a core dump.
*/
cpu_coredump(p, vp, cred)
struct proc *p;
struct vnode *vp;
struct ucred *cred;
{
int error;
return (vn_rdwr(UIO_WRITE, vp, (caddr_t) p->p_addr, ctob(UPAGES),
(off_t)0, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *) NULL,
p));
}
/*
* Move pages from one kernel virtual address to another.
* Both addresses are assumed to reside in the Sysmap,
* and size must be a multiple of CLSIZE.
*/
pagemove(from, to, size)
register caddr_t from, to;
int size;
{
register struct pte *fpte, *tpte;
if (size % CLBYTES)
panic("pagemove");
fpte = kvtopte(from);
tpte = kvtopte(to);
while (size > 0) {
*tpte++ = *fpte;
*(int *)fpte++ = PG_NV;
TBIS(from);
TBIS(to);
from += NBPG;
to += NBPG;
size -= NBPG;
}
#ifdef LUNA2
DCIS();
#endif
}
/*
* Map `size' bytes of physical memory starting at `paddr' into
* kernel VA space at `vaddr'. Read/write and cache-inhibit status
* are specified by `prot'.
*/
physaccess(vaddr, paddr, size, prot)
caddr_t vaddr, paddr;
register int size, prot;
{
register struct pte *pte;
register u_int page;
pte = kvtopte(vaddr);
page = (u_int)paddr & PG_FRAME;
for (size = btoc(size); size; size--) {
*(int *)pte++ = PG_V | prot | page;
page += NBPG;
}
TBIAS();
}
physunaccess(vaddr, size)
caddr_t vaddr;
register int size;
{
register struct pte *pte;
pte = kvtopte(vaddr);
for (size = btoc(size); size; size--)
*(int *)pte++ = PG_NV;
TBIAS();
}
/*
* Set a red zone in the kernel stack after the u. area.
* We don't support a redzone right now. It really isn't clear
* that it is a good idea since, if the kernel stack were to roll
* into a write protected page, the processor would lock up (since
* it cannot create an exception frame) and we would get no useful
* post-mortem info. Currently, under the DEBUG option, we just
* check at every clock interrupt to see if the current k-stack has
* gone too far (i.e. into the "redzone" page) and if so, panic.
* Look at _lev6intr in locore.s for more details.
*/
/*ARGSUSED*/
setredzone(pte, vaddr)
struct pte *pte;
caddr_t vaddr;
{
}
/*
* Convert kernel VA to physical address
*/
kvtop(addr)
register caddr_t addr;
{
vm_offset_t va;
va = pmap_extract(kernel_pmap, (vm_offset_t)addr);
if (va == 0)
panic("kvtop: zero page frame");
return((int)va);
}
extern vm_map_t phys_map;
/*
* Map an IO request into kernel virtual address space.
*
* XXX we allocate KVA space by using kmem_alloc_wait which we know
* allocates space without backing physical memory. This implementation
* is a total crock, the multiple mappings of these physical pages should
* be reflected in the higher-level VM structures to avoid problems.
*/
vmapbuf(bp)
register struct buf *bp;
{
register int npf;
register caddr_t addr;
register long flags = bp->b_flags;
struct proc *p;
int off;
vm_offset_t kva;
register vm_offset_t pa;
if ((flags & B_PHYS) == 0)
panic("vmapbuf");
addr = bp->b_saveaddr = bp->b_data;
off = (int)addr & PGOFSET;
p = bp->b_proc;
npf = btoc(round_page(bp->b_bcount + off));
kva = kmem_alloc_wait(phys_map, ctob(npf));
bp->b_data = (caddr_t) (kva + off);
while (npf--) {
pa = pmap_extract(vm_map_pmap(&p->p_vmspace->vm_map),
(vm_offset_t)addr);
if (pa == 0)
panic("vmapbuf: null page frame");
pmap_enter(vm_map_pmap(phys_map), kva, trunc_page(pa),
VM_PROT_READ|VM_PROT_WRITE, TRUE);
addr += PAGE_SIZE;
kva += PAGE_SIZE;
}
}
/*
* Free the io map PTEs associated with this IO operation.
*/
vunmapbuf(bp)
register struct buf *bp;
{
register caddr_t addr;
register int npf;
vm_offset_t kva;
if ((bp->b_flags & B_PHYS) == 0)
panic("vunmapbuf");
addr = bp->b_data;
npf = btoc(round_page(bp->b_bcount + ((int)addr & PGOFSET)));
kva = (vm_offset_t)((int)addr & ~PGOFSET);
kmem_free_wakeup(phys_map, kva, ctob(npf));
bp->b_data = bp->b_saveaddr;
bp->b_saveaddr = NULL;
}
#ifdef MAPPEDCOPY
u_int mappedcopysize = 4096;
mappedcopyin(fromp, top, count)
register char *fromp, *top;
register int count;
{
register vm_offset_t kva, upa;
register int off, len;
int alignable;
pmap_t upmap;
extern caddr_t CADDR1;
kva = (vm_offset_t) CADDR1;
off = (vm_offset_t)fromp & PAGE_MASK;
alignable = (off == ((vm_offset_t)top & PAGE_MASK));
upmap = vm_map_pmap(&curproc->p_vmspace->vm_map);
while (count > 0) {
/*
* First access of a page, use fubyte to make sure
* page is faulted in and read access allowed.
*/
if (fubyte(fromp) == -1)
return (EFAULT);
/*
* Map in the page and bcopy data in from it
*/
upa = pmap_extract(upmap, trunc_page(fromp));
if (upa == 0)
panic("mappedcopyin");
len = min(count, PAGE_SIZE-off);
pmap_enter(kernel_pmap, kva, upa, VM_PROT_READ, TRUE);
if (len == PAGE_SIZE && alignable && off == 0)
copypage(kva, top);
else
bcopy((caddr_t)(kva+off), top, len);
fromp += len;
top += len;
count -= len;
off = 0;
}
pmap_remove(kernel_pmap, kva, kva+PAGE_SIZE);
return (0);
}
mappedcopyout(fromp, top, count)
register char *fromp, *top;
register int count;
{
register vm_offset_t kva, upa;
register int off, len;
int alignable;
pmap_t upmap;
extern caddr_t CADDR2;
kva = (vm_offset_t) CADDR2;
off = (vm_offset_t)top & PAGE_MASK;
alignable = (off == ((vm_offset_t)fromp & PAGE_MASK));
upmap = vm_map_pmap(&curproc->p_vmspace->vm_map);
while (count > 0) {
/*
* First access of a page, use subyte to make sure
* page is faulted in and write access allowed.
*/
if (subyte(top, *fromp) == -1)
return (EFAULT);
/*
* Map in the page and bcopy data out to it
*/
upa = pmap_extract(upmap, trunc_page(top));
if (upa == 0)
panic("mappedcopyout");
len = min(count, PAGE_SIZE-off);
pmap_enter(kernel_pmap, kva, upa,
VM_PROT_READ|VM_PROT_WRITE, TRUE);
if (len == PAGE_SIZE && alignable && off == 0)
copypage(fromp, kva);
else
bcopy(fromp, (caddr_t)(kva+off), len);
fromp += len;
top += len;
count -= len;
off = 0;
}
pmap_remove(kernel_pmap, kva, kva+PAGE_SIZE);
return (0);
}
#endif