* Copyright (c) 1982, 1986 Regents of the University of California.
* All rights reserved. The Berkeley software License Agreement
* specifies the terms and conditions for redistribution.
* @(#)vm_swap.c 7.1 (Berkeley) 6/5/86
#include "../machine/pte.h"
register struct text
*xp
;
p
->p_szpt
= clrnd(ctopt(p
->p_ssize
+p
->p_dsize
+p
->p_tsize
+UPAGES
));
if (vgetpt(p
, memall
) == 0)
if (vgetu(p
, memall
, Swapmap
, &swaputl
, (struct user
*)0) == 0) {
swdspt(p
, &swaputl
, B_READ
);
* Make sure swdspt didn't smash u. pte's
for (i
= 0; i
< UPAGES
; i
++) {
if (Swapmap
[i
].pg_pfnum
!= p
->p_addr
[i
].pg_pfnum
)
swaputl
.u_pcb
.pcb_sswap
= (int *)&u
.u_ssave
;
int xswapwant
, xswaplock
;
* ds and ss are the old data size and the stack size
* of the process, and are supplied during page table
register struct pte
*map
;
register struct user
*utl
;
if ((xswaplock
& 2) == 0) {
sleep((caddr_t
)map
, PSWP
);
if (vgetswu(p
, utl
) == 0) {
if (p
->p_textp
->x_ccount
== 1)
p
->p_textp
->x_swrss
= p
->p_textp
->x_rssize
;
p
->p_swrss
= p
->p_rssize
;
vsswap(p
, dptopte(p
, 0), CDATA
, 0, (int)ds
, &utl
->u_dmap
);
vsswap(p
, sptopte(p
, CLSIZE
-1), CSTACK
, 0, (int)ss
, &utl
->u_smap
);
* If freeing the user structure and kernel stack
* for the current process, have to run a bit longer
* using the pages which are about to be freed...
* vrelu will then block memory allocation by raising ipl.
if ((p
->p_flag
& SLOAD
) && (p
->p_stat
!= SRUN
|| p
!= u
.u_procp
))
wakeup((caddr_t
)&runout
);
* Swap the data and stack page tables in or out.
* Only hard thing is swapping out when new pt size is different than old.
* If we are growing new pt pages, then we must spread pages with 2 swaps.
* If we are shrinking pt pages, then we must merge stack pte's into last
* data page so as not to lose them (and also do two swaps).
register struct user
*utl
;
register int szpt
, tsz
, ssz
;
register struct pte
*pte
;
szpt
= clrnd(ctopt(p
->p_tsize
+p
->p_dsize
+p
->p_ssize
+UPAGES
));
tsz
= p
->p_tsize
/ NPTEPG
;
(p
->p_szpt
- tsz
) * NBPG
- UPAGES
* sizeof (struct pte
));
ssz
= clrnd(ctopt(utl
->u_ossize
+UPAGES
));
if (szpt
< p
->p_szpt
&& utl
->u_odsize
&& (utl
->u_ossize
+UPAGES
)) {
* Page tables shrinking... see if last text+data and
* last stack page must be merged... if so, copy
* stack pte's from last stack page to end of last
* data page, and decrease size of stack pt to be swapped.
tdlast
= (p
->p_tsize
+ utl
->u_odsize
) % (NPTEPG
* CLSIZE
);
slast
= (utl
->u_ossize
+ UPAGES
) % (NPTEPG
* CLSIZE
);
if (tdlast
&& slast
&& tdlast
+ slast
<= (NPTEPG
* CLSIZE
)) {
tdsz
= clrnd(ctopt(p
->p_tsize
+ utl
->u_odsize
));
bcopy((caddr_t
)sptopte(p
, utl
->u_ossize
- 1),
(caddr_t
)&p
->p_p0br
[tdsz
* NPTEPG
- slast
],
(unsigned)(slast
* sizeof (struct pte
)));
swpt(rdwri
, p
, szpt
- ssz
- tsz
, p
->p_szpt
- ssz
, ssz
* NBPG
);
(int)(clrnd(ctopt(p
->p_tsize
+ utl
->u_odsize
)) - tsz
) * NBPG
);
for (i
= 0; i
< utl
->u_odsize
; i
++) {
if (pte
->pg_v
|| pte
->pg_fod
== 0 && (pte
->pg_pfnum
||pte
->pg_m
))
for (i
= 0; i
< utl
->u_ossize
; i
++) {
if (pte
->pg_v
|| pte
->pg_fod
== 0 && (pte
->pg_pfnum
||pte
->pg_m
))
* Swap a section of the page tables.
* Errors are handled at a lower level (by doing a panic).
swpt(rdwri
, p
, doff
, a
, n
)
(void)swap(p
, p
->p_swaddr
+ ctod(UPAGES
) + ctod(doff
),
(caddr_t
)&p
->p_p0br
[a
* NPTEPG
], n
, rdwri
, B_PAGET
, swapdev
, 0);