* Copyright (c) 1990 The Regents of the University of California.
* This code is derived from software contributed to Berkeley by
* the University of Utah, and William Jolitz.
* @(#)vm_machdep.c 5.4 (Berkeley) %G%
* Copyright (c) 1989, 1990 William F. Jolitz
* Copyright (c) 1988 University of Utah.
* All rights reserved. The Utah Software License Agreement
* specifies the terms and conditions for redistribution.
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
* Copyright (c) 1982, 1986 Regents of the University of California.
* All rights reserved. The Berkeley software License Agreement
* specifies the terms and conditions for redistribution.
* @(#)vm_machdep.c 7.1 (Berkeley) 6/5/86
* Set a red zone in the kernel stack after the u. area.
/* eventually do this by setting up an expand-down stack segment
for ss0: selector, allowing stack access down to top of u.
this means though that protection violations need to be handled
thru a double fault exception that must do an integral task
switch to a known good context, within which a dump can be
taken. a sensible scheme might be to save the initial context
used by sched (that has physical memory mapped 1:1 at bottom)
and take the dump while still in mapped mode */
* Check for valid program size
* NB - Check data and data growth separately as they may overflow
chksize(ts
, ids
, uds
, ss
)
unsigned ts
, ids
, uds
, ss
;
extern unsigned maxtsize
;
if (ctob(ts
) > maxtsize
||
ctob(ids
) > u
.u_rlimit
[RLIMIT_DATA
].rlim_cur
||
ctob(uds
) > u
.u_rlimit
[RLIMIT_DATA
].rlim_cur
||
ctob(ids
+ uds
) > u
.u_rlimit
[RLIMIT_DATA
].rlim_cur
||
ctob(ss
) > u
.u_rlimit
[RLIMIT_STACK
].rlim_cur
) {
* Change protection codes of text segment.
* Have to flush translation buffer since this
* affect virtual memory mapping of current process.
register struct pte
*pte
;
if (!isatsv(u
.u_procp
, v
)) {
tp
= vtotp(u
.u_procp
, v
);
pte
= tptopte(u
.u_procp
, tp
);
if (pte
->pg_fod
== 0 && pte
->pg_pfnum
) {
c
= &cmap
[pgtocm(pte
->pg_pfnum
)];
if (c
->c_blkno
&& c
->c_mdev
!= MSWAPX
)
munhash(mount
[c
->c_mdev
].m_dev
,
(daddr_t
)(u_long
)c
->c_blkno
);
*(u_int
*)pte
&= ~PG_PROT
;
register u_int
*ptaddr
, i
;
ptaddr
= (u_int
*)u
.u_procp
->p_p0br
;
for (i
= 0; i
< u
.u_tsize
; i
++) {
* Simulate effect of VAX region length registers.
* The one case where we must do anything is if a region has shrunk.
* In that case we must invalidate all the PTEs for the no longer valid VAs.
register struct pte
*pte
;
olen
= P1PAGES
- u
.u_pcb
.pcb_p1lr
;
if ((change
= olen
- nlen
) <= 0)
pte
= u
.u_pcb
.pcb_p0br
+ u
.u_pcb
.pcb_p0lr
;
pte
= u
.u_pcb
.pcb_p1br
+ u
.u_pcb
.pcb_p1lr
- change
;
* Map `size' bytes of physical memory starting at `paddr' into
* kernel VA space using PTEs starting at `pte'. Read/write and
* cache-inhibit status are specified by `prot'.
physaccess(pte
, paddr
, size
, prot
)
register struct pte
*pte
;
page
= (u_int
)paddr
& PG_FRAME
;
for (size
= btoc(size
); size
; size
--) {
*(int *)pte
= PG_V
| prot
| page
;
* Move pages from one kernel virtual address to another.
* Both addresses are assumed to reside in the Sysmap,
* and size must be a multiple of CLSIZE.
register caddr_t from
, to
;
register struct pte
*fpte
, *tpte
;
fpte
= &Sysmap
[btop(from
-0xfe000000)];
tpte
= &Sysmap
[btop(to
-0xfe000000)];
* The probe[rw] routines should probably be redone in assembler
if (page
< dptov(p
, p
->p_dsize
) || page
> sptov(p
, p
->p_ssize
))
if (page
< dptov(p
, p
->p_dsize
) || page
> sptov(p
, p
->p_ssize
))
return((*(int *)vtopte(p
, page
) & PG_PROT
) == PG_UW
);
* NB: assumes a physically contiguous kernel page table
* (makes life a LOT simpler).
register struct pde
*pde
;
register struct pte
*pte
;
pde
= (struct pde
*)((u_int
)u
.u_procp
->p_p0br
+ u
.u_procp
->p_szpt
* NBPG
);
ix
= (addr
& PD_MASK
) >> PD_SHIFT
;
cnt
= ((addr
+ count
+ (1 << PD_SHIFT
) - 1) & PD_MASK
) >> PD_SHIFT
;
for (pde
+= ix
; cnt
; cnt
--, pde
++)
ix
= btop(addr
-0xfe000000);
cnt
= btop(addr
-0xfe000000+count
+NBPG
-1);
for (pte
= &Sysmap
[ix
]; cnt
; cnt
--, pte
++)
if (pte
->pg_v
== 0 /*|| (rw == B_WRITE && pte->pg_prot == 1)*/)
extern int prober(), probew();
func
= (rw
== B_READ
) ? prober
: probew
;
addr2
= (addr2
+ NBPG
) & ~PGOFSET
;
* Convert kernel VA to physical address
pf
= Sysmap
[btop(addr
-0xfe000000)].pg_pfnum
;
panic("kvtop: zero page frame");
return((u_int
)ptob(pf
) + (addr
& PGOFSET
));
register struct pde
*pde
;
pde
= (struct pde
*)((u_int
)p
->p_p0br
+ p
->p_szpt
* NBPG
);
return(pde
+ ((va
& PD_MASK
) >> PD_SHIFT
));
return(ctob(Usrptmap
[btokmx(p
->p_p0br
+p
->p_szpt
*NPTEPG
)].pg_pfnum
));
/*return((int)Usrptmap[btokmx(p->p_p0br) + p->p_szpt].pg_pfnum);*/
* Initialize page directory table to reflect PTEs in Usrptmap.
* Page directory table address is given by Usrptmap index of p_szpt.
* [used by vgetpt for kernal mode entries, and ptexpand for user mode entries]
register struct pde
*pde
, *toppde
;
extern struct pde
*vtopde();
pde
= vtopde(p
, &Sysbase
);
for (i
= 0; i
< 5; i
++, pde
++) {
*(int *)pde
= PG_UW
| PG_V
;
pde
->pd_pfnum
= btoc((unsigned) Sysmap
& ~0xfe000000)+i
;
*(int *)pde
= PG_UW
| PG_V
;
pde
->pd_pfnum
= Usrptmap
[btokmx(p
->p_addr
)].pg_pfnum
;
/* otherwise, fill in user map */
sz
= ctopt(p
->p_tsize
+ p
->p_dsize
);
for (i
= 0; i
< sz
; i
++, pde
++) {
*(int *)pde
= PG_UW
| PG_V
;
pde
->pd_pfnum
= Usrptmap
[k
++].pg_pfnum
;
* Bogus! The kernelmap may map unused PT pages
* (since we don't shrink PTs) so we need to skip over
* those PDEs. We should really free the unused PT
sz
+= ctopt(p
->p_ssize
+UPAGES
);
sz
= NPTEPG
- ctopt(p
->p_ssize
+ UPAGES
+ btoc(&Sysbase
));
for ( ; i
< sz
; i
++, pde
++)
sz
= NPTEPG
- ctopt(UPAGES
+ btoc(&Sysbase
));
for ( ; i
< sz
; i
++, pde
++) {
*(int *)pde
= PG_UW
| PG_V
;
pde
->pd_pfnum
= Usrptmap
[k
++].pg_pfnum
;
* Allocate wired-down, non-paged, cache-inhibited pages in kernel
* virtual memory and clear them
register struct pte
*pte
;
extern struct map
*kernelmap
;
a
= rmalloc(kernelmap
, (long)npg
);
(void) vmemall(pte
, npg
, &proc
[0], CSYS
);
*(int *)pte
|= (PG_V
|PG_KW
|PG_CI
);
clearseg((unsigned)pte
->pg_pfnum
);
return ((caddr_t
)kmxtob(a
));
extern struct pte Usriomap
[];
* Map an IO request into kernel virtual address space. Requests fall into
* one of five catagories:
* B_PHYS|B_UAREA: User u-area swap.
* Address is relative to start of u-area (p_addr).
* B_PHYS|B_PAGET: User page table swap.
* Address is a kernel VA in usrpt (Usrptmap).
* B_PHYS|B_DIRTY: Dirty page push.
* Address is a VA in proc2's address space.
* B_PHYS|B_PGIN: Kernel pagein of user pages.
* Address is VA in user's address space.
* B_PHYS: User "raw" IO request.
* Address is VA in user's address space.
* All requests are (re)mapped into kernel VA space via the useriomap
* (a name with only slightly more meaning than "kernelmap")
register struct pte
*pte
, *iopte
;
register long flags
= bp
->b_flags
;
if ((flags
& B_PHYS
) == 0)
* Find PTEs for the area to be mapped
p
= flags
&B_DIRTY
? &proc
[2] : bp
->b_proc
;
pte
= &p
->p_addr
[btop(addr
)];
else if (flags
& B_PAGET
)
pte
= &Usrptmap
[btokmx((struct pte
*)addr
)];
pte
= vtopte(p
, btop(addr
));
* Allocate some kernel PTEs and load them
off
= (int)addr
& PGOFSET
;
npf
= btoc(bp
->b_bcount
+ off
);
while ((a
= rmalloc(useriomap
, npf
)) == 0) {
sleep((caddr_t
)useriomap
, PSWP
);
addr
= bp
->b_un
.b_addr
= (caddr_t
)(usrio
+ (a
<< PGSHIFT
)) + off
;
mapin(iopte
, a
, pte
->pg_pfnum
, PG_V
);
* Free the io map PTEs associated with this IO operation.
* We also invalidate the TLB entries.
register caddr_t addr
= bp
->b_un
.b_addr
;
register struct pte
*pte
;
if ((bp
->b_flags
& B_PHYS
) == 0)
a
= (int)(addr
- usrio
) >> PGSHIFT
;
npf
= btoc(bp
->b_bcount
+ ((int)addr
& PGOFSET
));
rmfree(useriomap
, npf
, a
);
wakeup((caddr_t
)useriomap
);
* If we just completed a dirty page push, we must reconstruct
* the original b_addr since cleanup() needs it.
if (bp
->b_flags
& B_DIRTY
) {
a
= ((bp
- swbuf
) * CLSIZE
) * KLMAX
;
bp
->b_un
.b_addr
= (caddr_t
)ctob(dptov(&proc
[2], a
));