BSD 4_3_Reno release
[unix-history] / usr / src / sys / kern / kern_mman.c
CommitLineData
1c15e888
C
1/*
2 * Copyright (c) 1982, 1986, 1989 Regents of the University of California.
3 * All rights reserved.
4 *
5 * Redistribution is only permitted until one year after the first shipment
6 * of 4.4BSD by the Regents. Otherwise, redistribution and use in source and
7 * binary forms are permitted provided that: (1) source distributions retain
8 * this entire copyright notice and comment, and (2) distributions including
9 * binaries display the following acknowledgement: This product includes
10 * software developed by the University of California, Berkeley and its
11 * contributors'' in the documentation or other materials provided with the
12 * distribution and in all advertising materials mentioning features or use
13 * of this software. Neither the name of the University nor the names of
14 * its contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
17 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
18 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
19 *
20 * @(#)kern_mman.c 7.18 (Berkeley) 6/30/90
21 */
22
23#include "param.h"
24#include "systm.h"
25#include "map.h"
26#include "user.h"
27#include "proc.h"
28#include "buf.h"
29#include "vnode.h"
30#include "specdev.h"
31#include "seg.h"
32#include "acct.h"
33#include "wait.h"
34#include "vm.h"
35#include "text.h"
36#include "file.h"
37#include "vadvise.h"
38#include "cmap.h"
39#include "trace.h"
40#include "mman.h"
41#include "mapmem.h"
42#include "malloc.h"
43#include "conf.h"
44
45#include "machine/cpu.h"
46#include "machine/reg.h"
47#include "machine/psl.h"
48#include "machine/pte.h"
49#include "machine/mtpr.h"
50
51/*
52 * The MMAP code here is temporary; it provides support
53 * only for mmaping devices such as frame buffers.
54 * All to be different next time...
55 */
56#ifndef MAPMEM
57#undef MMAP /* XXX */
58#endif
59
60#ifdef MMAP
61struct mapmemops mmapops = {
62 (int (*)())0, (int (*)())0, (int (*)())0, (int (*)())0
63};
64#endif
65
66/* ARGSUSED */
67sbrk(p, uap, retval)
68 struct proc *p;
69 struct args {
70 int incr;
71 } *uap;
72 int *retval;
73{
74
75 /* Not yet implemented */
76 return (EOPNOTSUPP);
77}
78
79/* ARGSUSED */
80sstk(p, uap, retval)
81 struct proc *p;
82 struct args {
83 int incr;
84 } *uap;
85 int *retval;
86{
87
88 /* Not yet implemented */
89 return (EOPNOTSUPP);
90}
91
92/* ARGSUSED */
93getpagesize(p, uap, retval)
94 struct proc *p;
95 struct args *uap;
96 int *retval;
97{
98
99 *retval = NBPG * CLSIZE;
100 return (0);
101}
102
103/* ARGSUSED */
104smmap(p, uap, retval)
105 register struct proc *p;
106 register struct args {
107 caddr_t addr;
108 int len;
109 int prot;
110 int share;
111 int fd;
112 off_t pos;
113 } *uap;
114 int *retval;
115{
116#ifndef MMAP
117 return (EOPNOTSUPP);
118#else
119 register struct file *fp;
120 struct mapmem *mp;
121 struct vnode *vp;
122 register struct pte *pte;
123 struct pte *dpte;
124 register int off;
125 int error, fv, lv, pm, (*mapfun)();
126 dev_t dev;
127
128 if (error = getvnode(u.u_ofile, uap->fd, &fp))
129 return (error);
130 vp = (struct vnode *)fp->f_data;
131 if (vp->v_type != VCHR)
132 return (EINVAL);
133 dev = vp->v_rdev;
134 mapfun = cdevsw[major(dev)].d_mmap;
135 if (mapfun == NULL)
136 return (EINVAL);
137 if (((int)uap->addr & CLOFSET) || (uap->pos & CLOFSET) ||
138 uap->len <= 0 || (uap->len & CLOFSET))
139 return (EINVAL);
140 if ((uap->prot & PROT_WRITE) && (fp->f_flag&FWRITE) == 0)
141 return (EINVAL);
142 if ((uap->prot & PROT_READ) && (fp->f_flag&FREAD) == 0)
143 return (EINVAL);
144 if (uap->share != MAP_SHARED)
145 return (EINVAL);
146 for (off = 0; off < uap->len; off += NBPG)
147 if ((*mapfun)(dev, uap->pos+off, uap->prot) == -1)
148 return (EINVAL); /* Needs translation */
149 /*
150 * Allocate a descriptor for this region and expand page
151 * table to accomodate.
152 */
153 if (uap->prot & PROT_WRITE) {
154 pm = PG_UW|PG_FOD|PG_V;
155 off = MM_RW;
156 } else {
157 pm = PG_URKR|PG_FOD|PG_V;
158 off = MM_RO;
159 }
160#if defined(hp300)
161 pm |= PG_CI;
162 off |= MM_CI;
163#endif
164 error = mmalloc(p, uap->fd, &uap->addr, uap->len, off, &mmapops, &mp);
165 if (error)
166 return (error);
167 /*
168 * Now map it in.
169 * Can't use mmmapin() because of args to map function.
170 */
171 fv = btop(uap->addr);
172 pte = vtopte(p, fv);
173 dpte = dptopte(p, u.u_dsize);
174 for (off = 0; off < uap->len; off += NBPG) {
175 if ((off&CLOFSET) == 0 && pte < dpte)
176 p->p_rssize -= vmemfree(pte, CLSIZE);
177 *(int *)pte = pm;
178 pte->pg_pfnum = (*mapfun)(dev, uap->pos+off, uap->prot);
179 pte++;
180 }
181 newptes(vtopte(p, fv), fv, btoc(uap->len));
182 u.u_pofile[uap->fd] |= UF_MAPPED;
183 return (0);
184#endif /* MMAP */
185}
186
187/* ARGSUSED */
188msync(p, uap, retval)
189 struct proc *p;
190 struct args {
191 char *addr;
192 int len;
193 } *uap;
194 int *retval;
195{
196
197 /* Not yet implemented */
198 return (EOPNOTSUPP);
199}
200
201/* ARGSUSED */
202munmap(p, uap, retval)
203 register struct proc *p;
204 register struct args {
205 caddr_t addr;
206 int len;
207 } *uap;
208 int *retval;
209{
210#ifndef MMAP
211 return (EOPNOTSUPP);
212#else
213 register struct mapmem *mp;
214 register int fd;
215 caddr_t eaddr;
216 int error;
217
218 if (((int)uap->addr & CLOFSET) ||
219 uap->len <= 0 || (uap->len & CLOFSET))
220 return (EINVAL);
221 /*
222 * Locate region mapping this range. If found, unmap it.
223 */
224 eaddr = uap->addr + uap->len - 1;
225 for (mp = u.u_mmap; mp; mp = mp->mm_next)
226 if (mp->mm_ops == &mmapops &&
227 uap->addr >= mp->mm_uva && eaddr < mp->mm_uva+mp->mm_size)
228 break;
229 if (mp == MMNIL)
230 return (EINVAL);
231 fd = mp->mm_id;
232 mmmapout(p, mp);
233 error = mmfree(mp);
234 /*
235 * If no other range has this descriptor mapped, mark it as unmapped.
236 */
237 for (mp = u.u_mmap; mp; mp = mp->mm_next)
238 if (mp->mm_id == fd)
239 break;
240 if (mp == MMNIL)
241 u.u_pofile[fd] &= ~UF_MAPPED;
242 return (error);
243#endif /* MMAP */
244}
245
246munmapfd(fd)
247 int fd;
248{
249 int error = 0;
250#ifdef MMAP
251 struct proc *p = u.u_procp; /* XXX */
252 register struct mapmem *mp, **mpp;
253
254 if (p->p_flag & SVFORK)
255 return (0);
256 mpp = &u.u_mmap;
257 for (mp = *mpp; mp; mp = *mpp) {
258 if (mp->mm_ops == &mmapops && mp->mm_id == fd) {
259 mmmapout(p, mp);
260 error = mmfree(mp);
261 } else
262 mpp = &mp->mm_next;
263 }
264#endif
265 u.u_pofile[fd] &= ~UF_MAPPED;
266 return (error);
267}
268
269/* ARGSUSED */
270mprotect(p, uap, retval)
271 struct proc *p;
272 struct args {
273 char *addr;
274 int len;
275 int prot;
276 } *uap;
277 int *retval;
278{
279
280 /* Not yet implemented */
281 return (EOPNOTSUPP);
282}
283
284/* ARGSUSED */
285madvise(p, uap, retval)
286 struct proc *p;
287 struct args {
288 char *addr;
289 int len;
290 int behav;
291 } *uap;
292 int *retval;
293{
294
295 /* Not yet implemented */
296 return (EOPNOTSUPP);
297}
298
299/* ARGSUSED */
300mincore(p, uap, retval)
301 struct proc *p;
302 struct args {
303 char *addr;
304 int len;
305 char *vec;
306 } *uap;
307 int *retval;
308{
309
310 /* Not yet implemented */
311 return (EOPNOTSUPP);
312}
313
314/* BEGIN DEFUNCT */
315/* ARGSUSED */
316obreak(p, uap, retval)
317 struct proc *p;
318 struct args {
319 char *nsiz;
320 } *uap;
321 int *retval;
322{
323 register segsz_t n, d, ds;
324 int error;
325
326 /*
327 * set n to new data size
328 */
329 n = btoc(uap->nsiz) - dptov(p, 0);
330 if (n < 0)
331 n = 0;
332 /*
333 * since we can't pass a -ve argument for the difference to chksize,
334 * if d is negative, make ds equal to the final value and clear d.
335 * keep the real difference in n for later use in expand.
336 */
337 ds = u.u_dsize;
338 if ((n = d = clrnd(n - u.u_dsize)) < 0) {
339 ds += d;
340 d = 0;
341 }
342 if (ctob(ds + d) > u.u_rlimit[RLIMIT_DATA].rlim_cur)
343 return (ENOMEM);
344 if (error =
345 chksize((u_int)u.u_tsize, (u_int)ds, (u_int)d, (u_int)u.u_ssize))
346 return (error);
347#ifdef MAPMEM
348 /*
349 * If change would conflict with any mapped memory segment
350 * return ENOMEM.
351 */
352 if (u.u_mmap && n != 0) {
353 caddr_t low, high;
354
355 low = (caddr_t) ctob(dptov(p, ds));
356 high = low + ctob((n < 0) ? -n : n);
357 if (mmclash(u.u_mmap, low, high))
358 return (ENOMEM);
359 }
360#endif
361 if (error = swpexpand(ds + d, u.u_ssize, &u.u_dmap, &u.u_smap))
362 return (error);
363 if (p->p_mmsize && (p->p_mmsize -= n) < 0)
364 p->p_mmsize = 0;
365 expand((int)n, 0);
366 return (0);
367}
368
369/*
370 * Macros for clearing a page's reference bits.
371 */
372#ifdef REFBIT
373#if !defined(tahoe)
374#define uncache(pte) /* XXX */
375#endif
376
377#define CLRREF(pte, c, p, i) { \
378 if (!isatpte(p, pte)) \
379 uncache(pte); \
380 if (pte->pg_u) { \
381 c = &cmap[pgtocm(pte->pg_pfnum)]; \
382 if (c->c_lock) \
383 continue; \
384 pte->pg_u = 0; \
385 if (anycl(pte, pg_m)) \
386 pte->pg_m = 1; \
387 distcl(pte); \
388 if (isatpte(p, pte)) \
389 distpte(p->p_textp, i, pte); \
390 } \
391}
392#else
393#define CLRREF(pte, c, p, i) { \
394 c = &cmap[pgtocm(pte->pg_pfnum)]; \
395 if (c->c_lock) \
396 continue; \
397 pte->pg_v = 0; \
398 if (anycl(pte, pg_m)) \
399 pte->pg_m = 1; \
400 distcl(pte); \
401 if (isatpte(p, pte)) \
402 distpte(p->p_textp, i, pte); \
403}
404#endif
405
406/* ARGSUSED */
407ovadvise(rp, uap, retval)
408 register struct proc *rp;
409 struct args {
410 int anom;
411 } *uap;
412 int *retval;
413{
414 int oanom = rp->p_flag & SUANOM;
415 register struct pte *pte;
416 register struct cmap *c;
417 register unsigned i;
418
419 trace(TR_VADVISE, uap->anom, rp->p_pid);
420 rp->p_flag &= ~(SSEQL|SUANOM);
421 switch (uap->anom) {
422
423 case VA_ANOM:
424 rp->p_flag |= SUANOM;
425 break;
426
427 case VA_SEQL:
428 rp->p_flag |= SSEQL;
429 break;
430 }
431 if ((oanom && (rp->p_flag & SUANOM) == 0) || uap->anom == VA_FLUSH) {
432 for (i = 0; i < rp->p_dsize; i += CLSIZE) {
433 pte = dptopte(rp, i);
434#ifdef MAPMEM
435 /* don't do mmap pages */
436 if (pte->pg_v && !pte->pg_fod)
437#else
438 if (pte->pg_v)
439#endif
440 CLRREF(pte, c, rp, i);
441 }
442 }
443 if (uap->anom == VA_FLUSH) { /* invalidate all pages */
444 for (i = 1; i < rp->p_ssize; i += CLSIZE) {
445 pte = sptopte(rp, i);
446 if (pte->pg_v)
447 CLRREF(pte, c, rp, i);
448 }
449 for (i = 0; i < rp->p_tsize; i += CLSIZE) {
450 pte = tptopte(rp, i);
451 if (pte->pg_v)
452 CLRREF(pte, c, rp, i);
453 }
454 }
455#if defined(vax) || defined(tahoe)
456 mtpr(TBIA, 0);
457#endif
458#if defined(hp300)
459 TBIAU();
460#endif
461#if defined(i386)
462 tlbflush();
463#endif
464 return (0);
465}
466/* END DEFUNCT */
467
468/*
469 * Grow the stack to include the SP; true return if successful.
470 * Clients do not care about the cause of the error.
471 */
472grow(sp)
473 unsigned sp;
474{
475 int si, error;
476
477 if (sp >= USRSTACK-ctob(u.u_ssize))
478 return (0);
479 si = clrnd(btoc((USRSTACK-sp)) - u.u_ssize + SINCR);
480 if (ctob(si) > u.u_rlimit[RLIMIT_STACK].rlim_cur)
481 return (0);
482 if (error = chksize((u_int)u.u_tsize, (u_int)u.u_dsize, (u_int)0,
483 (u_int)u.u_ssize+si))
484 return (0);
485 if (error = swpexpand(u.u_dsize, u.u_ssize + si, &u.u_dmap, &u.u_smap))
486 return (0);
487 expand(si, 1);
488 return (1);
489}
490
491#ifdef MAPMEM
492
493/*
494 * Called from vpassvm() after full context has been passed from fup to tup.
495 * Always called in the context of the parent. NOTE: routines should NOT
496 * destroy regions.
497 */
498mmvfork(fup, tup)
499 struct user *fup, *tup;
500{
501 register struct mapmem *mp;
502
503 tup->u_mmap = fup->u_mmap;
504 fup->u_mmap = (struct mapmem *) 0;
505 for (mp = tup->u_mmap; mp; mp = mp->mm_next)
506 if (mp->mm_ops->mm_vfork)
507 (*mp->mm_ops->mm_vfork)(mp, fup, tup);
508}
509
510/*
511 * Called from procdup() for both parent and child. If in parent
512 * we need to duplicate mapped memory regions. In both parent and
513 * child, we call object specific routine.
514 */
515mmfork(pup, cup)
516 struct user *pup, *cup;
517{
518 register struct mapmem *mp, **mpp;
519 int error = 0;
520
521 if (pup) {
522 mmdup(pup, cup);
523 for (mp = pup->u_mmap; mp; mp = mp->mm_next)
524 if (mp->mm_ops->mm_fork)
525 (*mp->mm_ops->mm_fork)(mp, 0);
526 } else {
527 mpp = &u.u_mmap;
528 for (mp = *mpp; mp; mp = *mpp) {
529 if (mp->mm_ops->mm_fork)
530 (*mp->mm_ops->mm_fork)(mp, 1);
531 if (*mpp == mp)
532 mpp = &mp->mm_next;
533 }
534 error = mmexpand(u.u_procp);
535 }
536 return (error);
537}
538
539/*
540 * Its not clear that having a seperate exec routine is useful since
541 * exec frees the address space immediately afterwards. We probably
542 * need a post-exec hook to reestablish any mappings that persist
543 * across execs.
544 */
545mmexec(p)
546 struct proc *p;
547{
548 register struct mapmem *mp, **mpp;
549 int error1, error = 0;
550
551 mpp = &u.u_mmap;
552 for (mp = *mpp; mp; mp = *mpp) {
553 if (mp->mm_ops->mm_exec)
554 error = (*mp->mm_ops->mm_exec)(mp);
555 if (*mpp == mp) {
556 *mpp = mp->mm_next;
557 MMFREE(mp);
558 }
559 }
560 if (error1 = mmexpand(p))
561 return (error1);
562 if (p->p_mmsize)
563 panic("mmexec");
564 return (error);
565}
566
567/*
568 * Called from exit just before releasing address space.
569 * We always reclaim resources regardless of what the object routine does.
570 */
571mmexit(p)
572 struct proc *p;
573{
574 register struct mapmem *mp, **mpp;
575 int error1, error = 0;
576
577 mpp = &u.u_mmap;
578 for (mp = *mpp; mp; mp = *mpp) {
579 if (mp->mm_ops->mm_exit)
580 error = (*mp->mm_ops->mm_exit)(mp);
581 if (*mpp == mp) {
582 *mpp = mp->mm_next;
583 MMFREE(mp);
584 }
585 }
586 if (error1 = mmexpand(p))
587 return (error1);
588 if (p->p_mmsize)
589 panic("mmexit");
590 return (error);
591}
592
593/*
594 * Called from core just before dumping process image to core file.
595 * Used to unmap regions which cannot be dumped; e.g. a region mapping
596 * hardware registers which are write-only or must be accessed as bytes.
597 */
598mmcore(p)
599 struct proc *p;
600{
601 register struct mapmem *mp, **mpp;
602 int error = 0, error1, changed = 0;
603
604 mpp = &u.u_mmap;
605 for (mp = *mpp; mp; mp = *mpp) {
606 if ((mp->mm_prot & MM_NOCORE) == 0) {
607 mpp = &mp->mm_next;
608 continue;
609 }
610 if (mp->mm_ops->mm_exit)
611 error = (*mp->mm_ops->mm_exit)(mp);
612 if (*mpp == mp) {
613 *mpp = mp->mm_next;
614 MMFREE(mp);
615 }
616 changed++;
617 }
618 if (changed && (error1 = mmexpand(p)))
619 return (error1);
620 return (error);
621}
622
623/*
624 * Duplicate mapped memory regions in a forked process.
625 * XXX child may wind up short a few regions if not enough resources.
626 */
627mmdup(pu, cu)
628 struct user *pu, *cu;
629{
630 register struct mapmem *pmp, *cmp;
631 register struct pte *ppte, *cpte;
632 register segsz_t count;
633
634 /*
635 * First duplicate the mmap chain
636 */
637 MMALLOC(cu->u_mmap);
638 pmp = pu->u_mmap;
639 cmp = cu->u_mmap;
640 while (pmp && cmp) {
641 *cmp = *pmp;
642 if (pmp->mm_next)
643 MMALLOC(cmp->mm_next);
644 pmp = pmp->mm_next;
645 cmp = cmp->mm_next;
646 }
647 /*
648 * Now duplicate user address space that vmdup() won't do
649 * i.e. mapped regions outside of data segment.
650 */
651 ppte = dptopte(pu->u_procp, pu->u_procp->p_dsize);
652 cpte = dptopte(cu->u_procp, cu->u_procp->p_dsize);
653 for (count = pu->u_procp->p_mmsize; count; count--) {
654 if (ppte->pg_fod && ppte->pg_v)
655 *(int *)cpte = *(int *)ppte;
656 ppte++, cpte++;
657 }
658 cu->u_procp->p_flag |= SPTECHG;
659}
660
661mmalloc(p, id, uvap, size, prot, ops, mpp)
662 struct proc *p;
663 caddr_t *uvap;
664 segsz_t size;
665 struct mapmemops *ops;
666 struct mapmem **mpp;
667{
668 register struct mapmem *mp;
669 register u_int uva;
670 int error;
671
672 /*
673 * Validate size first
674 */
675 if (size <= 0 || (size & CLOFSET))
676 return(EINVAL);
677 /*
678 * A uva of zero means to map at our discretion.
679 * Our strategy is to place the segment at the max of:
680 * - the current data + mapped memory size
681 * - the default data size limit
682 * (if it will fit within the MAXDSIZ limit)
683 * If this is the first mapped memory region beyond the data
684 * segment we round to a MMSEG boundary to allow for data
685 * segment growth.
686 */
687 uva = (u_int) *uvap;
688 if (uva == 0) {
689 register u_int uva2;
690
691 uva = ctob(dptov(p, u.u_dsize + p->p_mmsize));
692 uva2 = ctob(dptov(p, btoc(DFLDSIZ)));
693 uva2 = ((uva2 + (MMSEG-1)) & ~(MMSEG-1));
694 if (uva < uva2 &&
695 uva2 + size < ctob(dptov(p, btoc(MAXDSIZ))))
696 uva = uva2;
697 else if (p->p_mmsize == 0)
698 uva = ((uva + (MMSEG-1)) & ~(MMSEG-1));
699 }
700 /*
701 * Impose necessary constraints on address.
702 */
703 if ((uva & CLOFSET) || uva < ctob(dptov(p, 0)) ||
704 uva+size >= ctob(sptov(p, u.u_ssize)))
705 return (EINVAL);
706 if (mmclash(u.u_mmap, (caddr_t)uva, (caddr_t)uva+size))
707 return (EINVAL);
708 /*
709 * Finally, allocate and initialize descriptor and expand
710 * user address space as necessary.
711 */
712 MMALLOC(mp);
713 if (mp == MMNIL)
714 return (ENOMEM);
715 mp->mm_next = u.u_mmap;
716 mp->mm_id = id;
717 mp->mm_uva = (caddr_t) uva;
718 mp->mm_size = size;
719 mp->mm_prot = prot;
720 mp->mm_ops = ops;
721 u.u_mmap = mp;
722 if (error = mmexpand(p)) {
723 u.u_mmap = mp->mm_next;
724 MMFREE(mp);
725 return(error);
726 }
727 *uvap = (caddr_t) uva;
728 *mpp = mp;
729 return(0);
730}
731
732mmfree(p, mp)
733 struct proc *p;
734 register struct mapmem *mp;
735{
736 register struct mapmem *cmp, **mpp;
737
738 /*
739 * Remove region from chain
740 */
741 mpp = &u.u_mmap;
742 for (cmp = *mpp; cmp; cmp = *mpp) {
743 if (cmp == mp)
744 break;
745 mpp = &cmp->mm_next;
746 }
747 if (cmp == MMNIL)
748 panic("mmfree");
749 *mpp = mp->mm_next;
750 MMFREE(mp);
751 return (mmexpand(p));
752}
753
754mmmapin(p, mp, mapfunc)
755 register struct proc *p;
756 register struct mapmem *mp;
757 int (*mapfunc)();
758{
759 register struct pte *pte;
760 register int off;
761 struct pte *dpte;
762 int pm, fv, lv;
763
764 /*
765 * Verify that range can be mapped
766 */
767 for (off = 0; off < mp->mm_size; off += NBPG)
768 if ((*mapfunc)(mp, off) == -1)
769 return (EINVAL);
770 /*
771 * Now verify that region is in range
772 */
773 fv = btop(mp->mm_uva);
774 lv = btop(mp->mm_uva + mp->mm_size - 1);
775 if (fv < dptov(p, 0) ||
776 lv >= dptov(p, u.u_dsize + p->p_mmsize))
777 return (ENOMEM);
778 /*
779 * Finally, do the mapping.
780 */
781 if (mp->mm_prot & MM_RO)
782 pm = PG_URKR|PG_FOD|PG_V;
783 else
784 pm = PG_UW|PG_FOD|PG_V;
785#if defined(hp300)
786 if (mp->mm_prot & MM_CI)
787 pm |= PG_CI;
788#endif
789 pte = vtopte(p, fv);
790 dpte = dptopte(p, u.u_dsize);
791 for (off = 0; off < mp->mm_size; off += NBPG) {
792 if ((off&CLOFSET) == 0 && pte < dpte)
793 p->p_rssize -= vmemfree(pte, CLSIZE);
794 *(int *)pte = pm;
795 pte->pg_pfnum = (*mapfunc)(mp, off);
796 pte++;
797 }
798 newptes(vtopte(p, fv), (u_int)fv, (int)btoc(mp->mm_size));
799 return (0);
800}
801
802mmmapout(p, mp)
803 register struct proc *p;
804 register struct mapmem *mp;
805{
806 register struct pte *pte;
807 register int off;
808 struct pte *dpte;
809 int fv, lv;
810
811 fv = btop(mp->mm_uva);
812 lv = btop(mp->mm_uva + mp->mm_size - 1);
813 if (fv < dptov(p, 0) ||
814 lv >= dptov(p, u.u_dsize + p->p_mmsize))
815 panic("mmmapout");
816 pte = vtopte(p, fv);
817 dpte = dptopte(p, u.u_dsize);
818 for (off = 0; off < mp->mm_size; off += NBPG) {
819 if (pte < dpte) {
820 if ((off & CLOFSET) == 0)
821 p->p_rssize -= vmemfree(pte, CLSIZE);
822 *(int *)pte = (PG_UW|PG_FOD);
823 ((struct fpte *)pte)->pg_fileno = PG_FZERO;
824 } else
825 *(int *)pte = 0;
826 pte++;
827 }
828 newptes(vtopte(p, fv), (u_int)fv, (int)btoc(mp->mm_size));
829}
830
831mmexpand(p)
832 struct proc *p;
833{
834 register int szpt, change;
835 caddr_t high;
836 segsz_t nsize, oms;
837
838 oms = p->p_mmsize;
839 /*
840 * Get new mmsize based on existing regions and use
841 * that to calculate change in page table size.
842 */
843 if (u.u_mmap) {
844 mmrange(u.u_mmap, (caddr_t *)0, &high);
845 nsize = btop(high) - dptov(p, u.u_dsize) + 1;
846 if (nsize < 0)
847 nsize = 0;
848 } else
849 nsize = 0;
850 change = nsize - oms;
851 if (change == 0)
852 return(0);
853
854 /*
855 * Ensure data + mapped memory fits within maximum data limit.
856 * This is possibly a little restrictive, but it helps keep
857 * page table sizes down.
858 */
859 if (change > 0 &&
860 (ctob(oms+change) > u.u_rlimit[RLIMIT_DATA].rlim_max ||
861 ctob(u.u_dsize+oms+change) > u.u_rlimit[RLIMIT_DATA].rlim_max))
862 return(ENOMEM);
863 /*
864 * Expand page table if necessary.
865 * Note that ptexpand takes care of flushing the translation buffer.
866 */
867 p->p_mmsize += change;
868#if defined(hp300) || defined(i386)
869 szpt = ptsize(p) - u.u_pcb.pcb_szpt;
870 if (szpt > 0)
871 ptexpand(szpt, u.u_dsize, oms, u.u_ssize);
872 setp0lr(u.u_pcb.pcb_p0lr + change);
873#endif
874#if defined(vax) || defined(tahoe)
875#if defined(vax)
876 szpt = (u.u_pcb.pcb_p1br + (u.u_pcb.pcb_p1lr&~PME_CLR)) -
877 (u.u_pcb.pcb_p0br + (u.u_pcb.pcb_p0lr&~AST_CLR));
878#else
879 szpt = (u.u_pcb.pcb_p2br + u.u_pcb.pcb_p2lr) -
880 (u.u_pcb.pcb_p0br + u.u_pcb.pcb_p0lr);
881#endif
882 if (change > szpt)
883 ptexpand(clrnd(ctopt(change - szpt)), u.u_dsize, oms, u.u_ssize);
884 /*
885 * Clear new ptes.
886 * We need to do this because there may be bogus (yet technically
887 * valid) ptes above the old p0lr value. This can happen if the
888 * data segment has shrunk in the past leaving such ptes behind.
889 * There is no need to invalidate such ptes at that time since the
890 * length register will prevent their use. We are safe on the HPs
891 * because we do invalidate old ptes in setp0lr() when shrinking.
892 */
893 if (change > 0) {
894 struct pte *bpte;
895
896#if defined(vax)
897 bpte = u.u_pcb.pcb_p0br + (u.u_pcb.pcb_p0lr&~AST_CLR);
898#else
899 bpte = u.u_pcb.pcb_p0br + u.u_pcb.pcb_p0lr;
900#endif
901 bzero((caddr_t)bpte, change * sizeof(struct pte));
902 mtpr(TBIA, 0);
903 }
904 /* avoid side-effects of setp0lr */
905#if defined(vax)
906 change += u.u_pcb.pcb_p0lr &~ AST_CLR;
907#else
908 change += u.u_pcb.pcb_p0lr;
909#endif
910 setp0lr(change);
911#endif
912 return(0);
913}
914
915mmrange(mp, lap, hap)
916 register struct mapmem *mp;
917 caddr_t *lap, *hap;
918{
919 register caddr_t low, high, top;
920
921 low = high = 0;
922 while (mp) {
923 if (low == 0 || mp->mm_uva < low)
924 low = mp->mm_uva;
925 top = mp->mm_uva + mp->mm_size - 1;
926 if (high == 0 || top > high)
927 high = top;
928 mp = mp->mm_next;
929 }
930 if (lap)
931 *lap = low;
932 if (hap)
933 *hap = high;
934}
935
936mmclash(mp, la, ha)
937 register struct mapmem *mp;
938 caddr_t la, ha;
939{
940 while (mp) {
941 if (ha > mp->mm_uva && la < mp->mm_uva + mp->mm_size)
942 return(1);
943 mp = mp->mm_next;
944 }
945 return(0);
946}
947
948#endif /* MAPMEM */