BSD 4_3_Net_2 release
[unix-history] / usr / src / sys / i386 / i386 / pmap.c
CommitLineData
8a97dd44 1/*
8a97dd44
WN
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
af359dea
C
6 * the Systems Programming Group of the University of Utah Computer
7 * Science Department and William Jolitz of UUNET Technologies Inc.
8a97dd44 8 *
af359dea
C
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
8a97dd44 24 *
af359dea
C
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
70aa16e7 36 *
af359dea
C
37 * @(#)pmap.c 7.7 (Berkeley) 5/12/91
38 */
39
40/*
8a97dd44
WN
41 * Derived from hp300 version by Mike Hibler, this version by William
42 * Jolitz uses a recursive map [a pde points to the page directory] to
43 * map the page tables using the pagetables themselves. This is done to
44 * reduce the impact on kernel virtual memory for lots of sparse address
45 * space, and to reduce the cost of memory to each process.
46 *
47 * Derived from: hp300/@(#)pmap.c 7.1 (Berkeley) 12/5/90
8a97dd44
WN
48 */
49
50/*
51 * Reno i386 version, from Mike Hibler's hp300 version.
52 */
53
54/*
55 * Manages physical address maps.
56 *
57 * In addition to hardware address maps, this
58 * module is called upon to provide software-use-only
59 * maps which may or may not be stored in the same
60 * form as hardware maps. These pseudo-maps are
61 * used to store intermediate results from copy
62 * operations to and from address spaces.
63 *
64 * Since the information managed by this module is
65 * also stored by the logical address mapping module,
66 * this module may throw away valid virtual-to-physical
67 * mappings at almost any time. However, invalidations
68 * of virtual-to-physical mappings must be done as
69 * requested.
70 *
71 * In order to cope with hardware architectures which
72 * make virtual-to-physical map invalidates expensive,
73 * this module may delay invalidate or reduced protection
74 * operations until such time as they are actually
75 * necessary. This module is given full information as
76 * to which processors are currently using which maps,
77 * and to when physical maps must be made correct.
78 */
79
80#include "param.h"
8a97dd44 81#include "proc.h"
8a97dd44 82#include "malloc.h"
af359dea 83#include "user.h"
8a97dd44 84
af359dea
C
85#include "vm/vm.h"
86#include "vm/vm_kern.h"
87#include "vm/vm_page.h"
88/*#include "vm/vm_pageout.h"*/
8a97dd44 89
af359dea 90/*#include "machine/isa.h"*/
8a97dd44
WN
91
92/*
93 * Allocate various and sundry SYSMAPs used in the days of old VM
94 * and not yet converted. XXX.
95 */
96#define BSDVM_COMPAT 1
97
98#ifdef DEBUG
8a97dd44
WN
99struct {
100 int kernel; /* entering kernel mapping */
101 int user; /* entering user mapping */
102 int ptpneeded; /* needed to allocate a PT page */
103 int pwchange; /* no mapping change, just wiring or protection */
104 int wchange; /* no mapping change, just wiring */
105 int mchange; /* was mapped but mapping to different page */
106 int managed; /* a managed page */
107 int firstpv; /* first mapping for this PA */
108 int secondpv; /* second mapping for this PA */
109 int ci; /* cache inhibited */
110 int unmanaged; /* not a managed page */
111 int flushes; /* cache flushes */
112} enter_stats;
113struct {
114 int calls;
115 int removes;
116 int pvfirst;
117 int pvsearch;
118 int ptinvalid;
119 int uflushes;
120 int sflushes;
121} remove_stats;
122
123int debugmap = 0;
75677547 124int pmapdebug = 0 /* 0xffff */;
8a97dd44
WN
125#define PDB_FOLLOW 0x0001
126#define PDB_INIT 0x0002
127#define PDB_ENTER 0x0004
128#define PDB_REMOVE 0x0008
129#define PDB_CREATE 0x0010
130#define PDB_PTPAGE 0x0020
131#define PDB_CACHE 0x0040
132#define PDB_BITS 0x0080
133#define PDB_COLLECT 0x0100
134#define PDB_PROTECT 0x0200
135#define PDB_PDRTAB 0x0400
136#define PDB_PARANOIA 0x2000
137#define PDB_WIRING 0x4000
138#define PDB_PVDUMP 0x8000
139
140int pmapvacflush = 0;
141#define PVF_ENTER 0x01
142#define PVF_REMOVE 0x02
143#define PVF_PROTECT 0x04
144#define PVF_TOTAL 0x80
145#endif
146
147/*
148 * Get PDEs and PTEs for user/kernel address space
149 */
150#define pmap_pde(m, v) (&((m)->pm_pdir[((vm_offset_t)(v) >> PD_SHIFT)&1023]))
8a97dd44
WN
151
152#define pmap_pte_pa(pte) (*(int *)(pte) & PG_FRAME)
153
154#define pmap_pde_v(pte) ((pte)->pd_v)
155#define pmap_pte_w(pte) ((pte)->pg_w)
156/* #define pmap_pte_ci(pte) ((pte)->pg_ci) */
157#define pmap_pte_m(pte) ((pte)->pg_m)
158#define pmap_pte_u(pte) ((pte)->pg_u)
159#define pmap_pte_v(pte) ((pte)->pg_v)
160#define pmap_pte_set_w(pte, v) ((pte)->pg_w = (v))
161#define pmap_pte_set_prot(pte, v) ((pte)->pg_prot = (v))
162
163/*
164 * Given a map and a machine independent protection code,
165 * convert to a vax protection code.
166 */
167#define pte_prot(m, p) (protection_codes[p])
168int protection_codes[8];
169
8a97dd44
WN
170struct pmap kernel_pmap_store;
171pmap_t kernel_pmap;
8a97dd44
WN
172
173vm_offset_t avail_start; /* PA of first available physical page */
174vm_offset_t avail_end; /* PA of last available physical page */
175vm_size_t mem_size; /* memory size in bytes */
176vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss)*/
177vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
178vm_offset_t vm_first_phys; /* PA of first managed page */
179vm_offset_t vm_last_phys; /* PA just past last managed page */
180int i386pagesperpage; /* PAGE_SIZE / I386_PAGE_SIZE */
181boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
182char *pmap_attributes; /* reference and modify bits */
183
184boolean_t pmap_testbit();
75677547 185void pmap_clear_modify();
8a97dd44
WN
186
187#if BSDVM_COMPAT
188#include "msgbuf.h"
189
190/*
191 * All those kernel PT submaps that BSD is so fond of
192 */
75677547
WN
193struct pte *CMAP1, *CMAP2, *mmap;
194caddr_t CADDR1, CADDR2, vmmap;
8a97dd44
WN
195struct pte *msgbufmap;
196struct msgbuf *msgbufp;
197#endif
198
199/*
200 * Bootstrap the system enough to run with virtual memory.
201 * Map the kernel's code and data, and allocate the system page table.
202 *
203 * On the I386 this is called after mapping has already been enabled
204 * and just syncs the pmap module with what has already been done.
205 * [We can't call it easily with mapping off since the kernel is not
206 * mapped with PA == VA, hence we would have to relocate every address
207 * from the linked base (virtual) address 0xFE000000 to the actual
208 * (physical) address starting relative to 0]
209 */
210struct pte *pmap_pte();
211
212extern vm_offset_t atdevbase;
af359dea 213void
8a97dd44
WN
214pmap_bootstrap(firstaddr, loadaddr)
215 vm_offset_t firstaddr;
216 vm_offset_t loadaddr;
217{
218#if BSDVM_COMPAT
219 vm_offset_t va;
220 struct pte *pte;
221#endif
222 extern vm_offset_t maxmem, physmem;
223extern int IdlePTD;
224
af359dea 225firstaddr = 0x100000; /*XXX basemem completely fucked (again) */
75677547 226 avail_start = firstaddr;
8a97dd44
WN
227 avail_end = maxmem << PG_SHIFT;
228
229 /* XXX: allow for msgbuf */
230 avail_end -= i386_round_page(sizeof(struct msgbuf));
231
232 mem_size = physmem << PG_SHIFT;
233 virtual_avail = atdevbase + 0x100000 - 0xa0000 + 10*NBPG;
234 virtual_end = VM_MAX_KERNEL_ADDRESS;
8a97dd44
WN
235 i386pagesperpage = PAGE_SIZE / I386_PAGE_SIZE;
236
237 /*
238 * Initialize protection array.
239 */
240 i386_protection_init();
241
242 /*
243 * The kernel's pmap is statically allocated so we don't
244 * have to use pmap_create, which is unlikely to work
245 * correctly at this part of the boot sequence.
246 */
247 kernel_pmap = &kernel_pmap_store;
248
249#ifdef notdef
250 /*
251 * Create Kernel page directory table and page maps.
75677547 252 * [ currently done in locore. i have wild and crazy ideas -wfj ]
8a97dd44
WN
253 */
254 bzero(firstaddr, 4*NBPG);
255 kernel_pmap->pm_pdir = firstaddr + VM_MIN_KERNEL_ADDRESS;
256 kernel_pmap->pm_ptab = firstaddr + VM_MIN_KERNEL_ADDRESS + NBPG;
257
258 firstaddr += NBPG;
259 for (x = i386_btod(VM_MIN_KERNEL_ADDRESS);
260 x < i386_btod(VM_MIN_KERNEL_ADDRESS)+3; x++) {
261 struct pde *pde;
262 pde = kernel_pmap->pm_pdir + x;
263 *(int *)pde = firstaddr + x*NBPG | PG_V | PG_KW;
264 }
265#else
75677547 266 kernel_pmap->pm_pdir = (pd_entry_t *)(0xfe000000 + IdlePTD);
8a97dd44
WN
267#endif
268
269
270 simple_lock_init(&kernel_pmap->pm_lock);
271 kernel_pmap->pm_count = 1;
272
273#if BSDVM_COMPAT
274 /*
275 * Allocate all the submaps we need
276 */
277#define SYSMAP(c, p, v, n) \
278 v = (c)va; va += ((n)*I386_PAGE_SIZE); p = pte; pte += (n);
279
280 va = virtual_avail;
281 pte = pmap_pte(kernel_pmap, va);
282
8a97dd44
WN
283 SYSMAP(caddr_t ,CMAP1 ,CADDR1 ,1 )
284 SYSMAP(caddr_t ,CMAP2 ,CADDR2 ,1 )
285 SYSMAP(caddr_t ,mmap ,vmmap ,1 )
286 SYSMAP(struct msgbuf * ,msgbufmap ,msgbufp ,1 )
8a97dd44
WN
287 virtual_avail = va;
288#endif
289
75677547
WN
290 /**(int *)PTD = 0;
291 load_cr3(rcr3());*/
8a97dd44 292
8a97dd44
WN
293}
294
295/*
296 * Initialize the pmap module.
297 * Called by vm_init, to initialize any structures that the pmap
298 * system needs to map virtual memory.
299 */
300void
301pmap_init(phys_start, phys_end)
302 vm_offset_t phys_start, phys_end;
303{
304 vm_offset_t addr, addr2;
305 vm_size_t npg, s;
306 int rv;
75677547 307 extern int KPTphys;
8a97dd44
WN
308
309#ifdef DEBUG
310 if (pmapdebug & PDB_FOLLOW)
311 printf("pmap_init(%x, %x)\n", phys_start, phys_end);
312#endif
313 /*
314 * Now that kernel map has been allocated, we can mark as
315 * unavailable regions which we have mapped in locore.
316 */
8a97dd44 317 addr = atdevbase;
af359dea 318 (void) vm_map_find(kernel_map, NULL, (vm_offset_t) 0,
8a97dd44 319 &addr, (0x100000-0xa0000), FALSE);
75677547 320
8a97dd44
WN
321 addr = (vm_offset_t) 0xfe000000+KPTphys/* *NBPG */;
322 vm_object_reference(kernel_object);
323 (void) vm_map_find(kernel_map, kernel_object, addr,
324 &addr, 2*NBPG, FALSE);
8a97dd44
WN
325
326 /*
327 * Allocate memory for random pmap data structures. Includes the
328 * pv_head_table and pmap_attributes.
329 */
330 npg = atop(phys_end - phys_start);
331 s = (vm_size_t) (sizeof(struct pv_entry) * npg + npg);
332 s = round_page(s);
333 addr = (vm_offset_t) kmem_alloc(kernel_map, s);
334 pv_table = (pv_entry_t) addr;
335 addr += sizeof(struct pv_entry) * npg;
336 pmap_attributes = (char *) addr;
337#ifdef DEBUG
338 if (pmapdebug & PDB_INIT)
339 printf("pmap_init: %x bytes (%x pgs): tbl %x attr %x\n",
340 s, npg, pv_table, pmap_attributes);
341#endif
342
8a97dd44
WN
343 /*
344 * Now it is safe to enable pv_table recording.
345 */
346 vm_first_phys = phys_start;
347 vm_last_phys = phys_end;
348 pmap_initialized = TRUE;
349}
350
351/*
352 * Used to map a range of physical addresses into kernel
353 * virtual address space.
354 *
355 * For now, VM is already on, we only need to map the
356 * specified memory.
357 */
358vm_offset_t
359pmap_map(virt, start, end, prot)
360 vm_offset_t virt;
361 vm_offset_t start;
362 vm_offset_t end;
363 int prot;
364{
365#ifdef DEBUG
366 if (pmapdebug & PDB_FOLLOW)
367 printf("pmap_map(%x, %x, %x, %x)\n", virt, start, end, prot);
368#endif
369 while (start < end) {
370 pmap_enter(kernel_pmap, virt, start, prot, FALSE);
371 virt += PAGE_SIZE;
372 start += PAGE_SIZE;
373 }
374 return(virt);
375}
376
377/*
378 * Create and return a physical map.
379 *
380 * If the size specified for the map
381 * is zero, the map is an actual physical
382 * map, and may be referenced by the
383 * hardware.
384 *
385 * If the size specified is non-zero,
386 * the map will be used in software only, and
387 * is bounded by that size.
388 *
389 * [ just allocate a ptd and mark it uninitialize -- should we track
75677547 390 * with a table which process has which ptd? -wfj ]
8a97dd44 391 */
af359dea 392
8a97dd44
WN
393pmap_t
394pmap_create(size)
395 vm_size_t size;
396{
397 register pmap_t pmap;
398
399#ifdef DEBUG
400 if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
af359dea 401 printf("pmap_create(%x)\n", size);
8a97dd44
WN
402#endif
403 /*
404 * Software use map does not need a pmap
405 */
406 if (size)
af359dea 407 return(NULL);
8a97dd44 408
af359dea 409 /* XXX: is it ok to wait here? */
8a97dd44 410 pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK);
af359dea
C
411#ifdef notifwewait
412 if (pmap == NULL)
8a97dd44 413 panic("pmap_create: cannot allocate a pmap");
af359dea
C
414#endif
415 bzero(pmap, sizeof(*pmap));
416 pmap_pinit(pmap);
417 return (pmap);
418}
419
420/*
421 * Initialize a preallocated and zeroed pmap structure,
422 * such as one in a vmspace structure.
423 */
424void
425pmap_pinit(pmap)
426 register struct pmap *pmap;
427{
428
429#ifdef DEBUG
430 if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
431 pg("pmap_pinit(%x)\n", pmap);
432#endif
8a97dd44
WN
433
434 /*
435 * No need to allocate page table space yet but we do need a
436 * valid page directory table.
437 */
8a97dd44 438 pmap->pm_pdir = (pd_entry_t *) kmem_alloc(kernel_map, NBPG);
af359dea
C
439
440 /* wire in kernel global address entries */
75677547
WN
441 bcopy(PTD+KPTDI_FIRST, pmap->pm_pdir+KPTDI_FIRST,
442 (KPTDI_LAST-KPTDI_FIRST+1)*4);
af359dea
C
443
444 /* install self-referential address mapping entry */
75677547
WN
445 *(int *)(pmap->pm_pdir+PTDPTDI) =
446 (int)pmap_extract(kernel_pmap, pmap->pm_pdir) | PG_V | PG_URKW;
447
8a97dd44
WN
448 pmap->pm_count = 1;
449 simple_lock_init(&pmap->pm_lock);
8a97dd44
WN
450}
451
452/*
453 * Retire the given physical map from service.
454 * Should only be called if the map contains
455 * no valid mappings.
456 */
457void
458pmap_destroy(pmap)
459 register pmap_t pmap;
460{
461 int count;
462
463#ifdef DEBUG
464 if (pmapdebug & PDB_FOLLOW)
af359dea 465 printf("pmap_destroy(%x)\n", pmap);
8a97dd44 466#endif
af359dea 467 if (pmap == NULL)
8a97dd44
WN
468 return;
469
470 simple_lock(&pmap->pm_lock);
af359dea 471 count = --pmap->pm_count;
8a97dd44 472 simple_unlock(&pmap->pm_lock);
af359dea
C
473 if (count == 0) {
474 pmap_release(pmap);
475 free((caddr_t)pmap, M_VMPMAP);
476 }
477}
8a97dd44 478
af359dea
C
479/*
480 * Release any resources held by the given physical map.
481 * Called when a pmap initialized by pmap_pinit is being released.
482 * Should only be called if the map contains no valid mappings.
483 */
484void
485pmap_release(pmap)
486 register struct pmap *pmap;
487{
488
489#ifdef DEBUG
490 if (pmapdebug & PDB_FOLLOW)
491 pg("pmap_release(%x)\n", pmap);
492#endif
493#ifdef notdef /* DIAGNOSTIC */
494 /* count would be 0 from pmap_destroy... */
495 simple_lock(&pmap->pm_lock);
496 if (pmap->pm_count != 1)
497 panic("pmap_release count");
498#endif
75677547 499 kmem_free(kernel_map, (vm_offset_t)pmap->pm_pdir, NBPG);
8a97dd44
WN
500}
501
502/*
503 * Add a reference to the specified pmap.
504 */
505void
506pmap_reference(pmap)
507 pmap_t pmap;
508{
509#ifdef DEBUG
510 if (pmapdebug & PDB_FOLLOW)
75677547 511 printf("pmap_reference(%x)", pmap);
8a97dd44 512#endif
af359dea 513 if (pmap != NULL) {
8a97dd44
WN
514 simple_lock(&pmap->pm_lock);
515 pmap->pm_count++;
516 simple_unlock(&pmap->pm_lock);
517 }
518}
519
8a97dd44
WN
520/*
521 * Remove the given range of addresses from the specified map.
522 *
523 * It is assumed that the start and end are properly
524 * rounded to the page size.
525 */
526void
527pmap_remove(pmap, sva, eva)
af359dea 528 register struct pmap *pmap;
8a97dd44
WN
529 vm_offset_t sva, eva;
530{
531 register vm_offset_t pa, va;
532 register pt_entry_t *pte;
533 register pv_entry_t pv, npv;
534 register int ix;
535 pmap_t ptpmap;
536 int *pde, s, bits;
537 boolean_t firstpage = TRUE;
538 boolean_t flushcache = FALSE;
539#ifdef DEBUG
540 pt_entry_t opte;
541
542 if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
75677547 543 pg("pmap_remove(%x, %x, %x)", pmap, sva, eva);
8a97dd44
WN
544#endif
545
af359dea 546 if (pmap == NULL)
8a97dd44
WN
547 return;
548
549#ifdef DEBUG
550 remove_stats.calls++;
551#endif
552 for (va = sva; va < eva; va += PAGE_SIZE) {
553 /*
554 * Weed out invalid mappings.
75677547
WN
555 * Note: we assume that the page directory table is
556 * always allocated, and in kernel virtual.
8a97dd44 557 */
75677547 558 if (!pmap_pde_v(pmap_pde(pmap, va)))
8a97dd44 559 continue;
75677547 560
8a97dd44 561 pte = pmap_pte(pmap, va);
75677547
WN
562 if (pte == 0)
563 continue;
8a97dd44
WN
564 pa = pmap_pte_pa(pte);
565 if (pa == 0)
566 continue;
567#ifdef DEBUG
568 opte = *pte;
569 remove_stats.removes++;
570#endif
571 /*
572 * Update statistics
573 */
574 if (pmap_pte_w(pte))
575 pmap->pm_stats.wired_count--;
576 pmap->pm_stats.resident_count--;
577
578 /*
579 * Invalidate the PTEs.
580 * XXX: should cluster them up and invalidate as many
581 * as possible at once.
582 */
583#ifdef DEBUG
584 if (pmapdebug & PDB_REMOVE)
75677547
WN
585 printf("remove: inv %x ptes at %x(%x) ",
586 i386pagesperpage, pte, *(int *)pte);
8a97dd44
WN
587#endif
588 bits = ix = 0;
589 do {
590 bits |= *(int *)pte & (PG_U|PG_M);
591 *(int *)pte++ = 0;
592 /*TBIS(va + ix * I386_PAGE_SIZE);*/
593 } while (++ix != i386pagesperpage);
af359dea
C
594 if (pmap == &curproc->p_vmspace->vm_pmap)
595 pmap_activate(pmap, (struct pcb *)curproc->p_addr);
596 /* are we current address space or kernel? */
597 /*if (pmap->pm_pdir[PTDPTDI].pd_pfnum == PTDpde.pd_pfnum
598 || pmap == kernel_pmap)
599 load_cr3(curpcb->pcb_ptd);*/
600 tlbflush();
8a97dd44 601
75677547
WN
602#ifdef needednotdone
603reduce wiring count on page table pages as references drop
8a97dd44
WN
604#endif
605
606 /*
607 * Remove from the PV table (raise IPL since we
608 * may be called at interrupt time).
609 */
610 if (pa < vm_first_phys || pa >= vm_last_phys)
611 continue;
612 pv = pa_to_pvh(pa);
8a97dd44
WN
613 s = splimp();
614 /*
615 * If it is the first entry on the list, it is actually
616 * in the header and we must copy the following entry up
617 * to the header. Otherwise we must search the list for
618 * the entry. In either case we free the now unused entry.
619 */
620 if (pmap == pv->pv_pmap && va == pv->pv_va) {
8a97dd44
WN
621 npv = pv->pv_next;
622 if (npv) {
623 *pv = *npv;
624 free((caddr_t)npv, M_VMPVENT);
625 } else
af359dea 626 pv->pv_pmap = NULL;
8a97dd44
WN
627#ifdef DEBUG
628 remove_stats.pvfirst++;
629#endif
630 } else {
631 for (npv = pv->pv_next; npv; npv = npv->pv_next) {
632#ifdef DEBUG
633 remove_stats.pvsearch++;
634#endif
635 if (pmap == npv->pv_pmap && va == npv->pv_va)
636 break;
637 pv = npv;
638 }
639#ifdef DEBUG
af359dea 640 if (npv == NULL)
8a97dd44
WN
641 panic("pmap_remove: PA not in pv_tab");
642#endif
8a97dd44
WN
643 pv->pv_next = npv->pv_next;
644 free((caddr_t)npv, M_VMPVENT);
645 pv = pa_to_pvh(pa);
646 }
8a97dd44 647
75677547
WN
648#ifdef notdef
649[tally number of pagetable pages, if sharing of ptpages adjust here]
8a97dd44 650#endif
8a97dd44
WN
651 /*
652 * Update saved attributes for managed page
653 */
654 pmap_attributes[pa_index(pa)] |= bits;
655 splx(s);
656 }
75677547
WN
657#ifdef notdef
658[cache and tlb flushing, if needed]
8a97dd44 659#endif
8a97dd44
WN
660}
661
662/*
663 * Routine: pmap_remove_all
664 * Function:
665 * Removes this physical page from
666 * all physical maps in which it resides.
667 * Reflects back modify bits to the pager.
668 */
669void
670pmap_remove_all(pa)
671 vm_offset_t pa;
672{
673 register pv_entry_t pv;
674 int s;
675
676#ifdef DEBUG
677 if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
75677547
WN
678 printf("pmap_remove_all(%x)", pa);
679 /*pmap_pvdump(pa);*/
8a97dd44
WN
680#endif
681 /*
682 * Not one of ours
683 */
684 if (pa < vm_first_phys || pa >= vm_last_phys)
8a97dd44 685 return;
8a97dd44
WN
686
687 pv = pa_to_pvh(pa);
688 s = splimp();
689 /*
690 * Do it the easy way for now
691 */
af359dea 692 while (pv->pv_pmap != NULL) {
8a97dd44
WN
693#ifdef DEBUG
694 if (!pmap_pde_v(pmap_pde(pv->pv_pmap, pv->pv_va)) ||
695 pmap_pte_pa(pmap_pte(pv->pv_pmap, pv->pv_va)) != pa)
696 panic("pmap_remove_all: bad mapping");
697#endif
698 pmap_remove(pv->pv_pmap, pv->pv_va, pv->pv_va + PAGE_SIZE);
699 }
700 splx(s);
701}
702
703/*
704 * Routine: pmap_copy_on_write
705 * Function:
706 * Remove write privileges from all
707 * physical maps for this physical page.
708 */
709void
710pmap_copy_on_write(pa)
711 vm_offset_t pa;
712{
713#ifdef DEBUG
714 if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
75677547 715 printf("pmap_copy_on_write(%x)", pa);
8a97dd44
WN
716#endif
717 pmap_changebit(pa, PG_RO, TRUE);
718}
719
720/*
721 * Set the physical protection on the
722 * specified range of this map as requested.
723 */
724void
725pmap_protect(pmap, sva, eva, prot)
726 register pmap_t pmap;
727 vm_offset_t sva, eva;
728 vm_prot_t prot;
729{
730 register pt_entry_t *pte;
731 register vm_offset_t va;
732 register int ix;
733 int i386prot;
734 boolean_t firstpage = TRUE;
735
736#ifdef DEBUG
737 if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
75677547 738 printf("pmap_protect(%x, %x, %x, %x)", pmap, sva, eva, prot);
8a97dd44 739#endif
af359dea 740 if (pmap == NULL)
8a97dd44
WN
741 return;
742
8a97dd44
WN
743 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
744 pmap_remove(pmap, sva, eva);
745 return;
746 }
747 if (prot & VM_PROT_WRITE)
75677547 748 return;
8a97dd44
WN
749
750 pte = pmap_pte(pmap, sva);
751 if(!pte) return;
752 for (va = sva; va < eva; va += PAGE_SIZE) {
753 /*
754 * Page table page is not allocated.
755 * Skip it, we don't want to force allocation
756 * of unnecessary PTE pages just to set the protection.
757 */
758 if (!pmap_pde_v(pmap_pde(pmap, va))) {
759 /* XXX: avoid address wrap around */
760 if (va >= i386_trunc_pdr((vm_offset_t)-1))
761 break;
762 va = i386_round_pdr(va + PAGE_SIZE) - PAGE_SIZE;
763 pte = pmap_pte(pmap, va);
764 pte += i386pagesperpage;
765 continue;
766 }
767 if(!pte) return;
768 /*
769 * Page not valid. Again, skip it.
770 * Should we do this? Or set protection anyway?
771 */
772 if (!pmap_pte_v(pte)) {
773 pte += i386pagesperpage;
774 continue;
775 }
776 ix = 0;
777 i386prot = pte_prot(pmap, prot);
778 if(va < UPT_MAX_ADDRESS)
779 i386prot |= 2 /*PG_u*/;
780 do {
781 /* clear VAC here if PG_RO? */
782 pmap_pte_set_prot(pte++, i386prot);
783 /*TBIS(va + ix * I386_PAGE_SIZE);*/
784 } while (++ix != i386pagesperpage);
785 }
af359dea
C
786 if (pmap == &curproc->p_vmspace->vm_pmap)
787 pmap_activate(pmap, (struct pcb *)curproc->p_addr);
8a97dd44
WN
788}
789
790/*
791 * Insert the given physical page (p) at
792 * the specified virtual address (v) in the
793 * target physical map with the protection requested.
794 *
795 * If specified, the page will be wired down, meaning
796 * that the related pte can not be reclaimed.
797 *
798 * NB: This is the only routine which MAY NOT lazy-evaluate
799 * or lose information. That is, this routine must actually
800 * insert this page into the given map NOW.
801 */
802void
803pmap_enter(pmap, va, pa, prot, wired)
804 register pmap_t pmap;
805 vm_offset_t va;
806 register vm_offset_t pa;
807 vm_prot_t prot;
808 boolean_t wired;
809{
810 register pt_entry_t *pte;
811 register int npte, ix;
812 vm_offset_t opa;
813 boolean_t cacheable = TRUE;
814 boolean_t checkpv = TRUE;
815
816#ifdef DEBUG
817 if (pmapdebug & (PDB_FOLLOW|PDB_ENTER))
75677547 818 printf("pmap_enter(%x, %x, %x, %x, %x)",
8a97dd44
WN
819 pmap, va, pa, prot, wired);
820#endif
af359dea 821 if (pmap == NULL)
8a97dd44
WN
822 return;
823
824 if(va > VM_MAX_KERNEL_ADDRESS)panic("pmap_enter: toobig");
825 /* also, should not muck with PTD va! */
826
827#ifdef DEBUG
828 if (pmap == kernel_pmap)
829 enter_stats.kernel++;
830 else
831 enter_stats.user++;
832#endif
8a97dd44
WN
833
834 /*
835 * Page Directory table entry not valid, we need a new PT page
836 */
75677547
WN
837 if (!pmap_pde_v(pmap_pde(pmap, va))) {
838 pg("ptdi %x", pmap->pm_pdir[PTDPTDI]);
839 }
8a97dd44
WN
840
841 pte = pmap_pte(pmap, va);
842 opa = pmap_pte_pa(pte);
843#ifdef DEBUG
844 if (pmapdebug & PDB_ENTER)
75677547 845 printf("enter: pte %x, *pte %x ", pte, *(int *)pte);
8a97dd44
WN
846#endif
847
848 /*
849 * Mapping has not changed, must be protection or wiring change.
850 */
851 if (opa == pa) {
852#ifdef DEBUG
853 enter_stats.pwchange++;
854#endif
855 /*
856 * Wiring change, just update stats.
857 * We don't worry about wiring PT pages as they remain
858 * resident as long as there are valid mappings in them.
859 * Hence, if a user page is wired, the PT page will be also.
860 */
861 if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) {
862#ifdef DEBUG
863 if (pmapdebug & PDB_ENTER)
75677547 864 pg("enter: wiring change -> %x ", wired);
8a97dd44
WN
865#endif
866 if (wired)
867 pmap->pm_stats.wired_count++;
868 else
869 pmap->pm_stats.wired_count--;
870#ifdef DEBUG
871 enter_stats.wchange++;
872#endif
873 }
8a97dd44
WN
874 goto validate;
875 }
876
877 /*
878 * Mapping has changed, invalidate old range and fall through to
879 * handle validating new mapping.
880 */
881 if (opa) {
882#ifdef DEBUG
883 if (pmapdebug & PDB_ENTER)
75677547 884 printf("enter: removing old mapping %x pa %x ", va, opa);
8a97dd44
WN
885#endif
886 pmap_remove(pmap, va, va + PAGE_SIZE);
887#ifdef DEBUG
888 enter_stats.mchange++;
889#endif
890 }
891
8a97dd44
WN
892 /*
893 * Enter on the PV list if part of our managed memory
894 * Note that we raise IPL while manipulating pv_table
895 * since pmap_enter can be called at interrupt time.
896 */
897 if (pa >= vm_first_phys && pa < vm_last_phys) {
898 register pv_entry_t pv, npv;
899 int s;
900
901#ifdef DEBUG
902 enter_stats.managed++;
903#endif
904 pv = pa_to_pvh(pa);
905 s = splimp();
906#ifdef DEBUG
907 if (pmapdebug & PDB_ENTER)
75677547 908 printf("enter: pv at %x: %x/%x/%x ",
8a97dd44
WN
909 pv, pv->pv_va, pv->pv_pmap, pv->pv_next);
910#endif
911 /*
912 * No entries yet, use header as the first entry
913 */
af359dea 914 if (pv->pv_pmap == NULL) {
8a97dd44
WN
915#ifdef DEBUG
916 enter_stats.firstpv++;
917#endif
918 pv->pv_va = va;
919 pv->pv_pmap = pmap;
af359dea 920 pv->pv_next = NULL;
8a97dd44
WN
921 pv->pv_flags = 0;
922 }
923 /*
924 * There is at least one other VA mapping this page.
925 * Place this entry after the header.
926 */
927 else {
75677547 928 /*printf("second time: ");*/
8a97dd44
WN
929#ifdef DEBUG
930 for (npv = pv; npv; npv = npv->pv_next)
931 if (pmap == npv->pv_pmap && va == npv->pv_va)
932 panic("pmap_enter: already in pv_tab");
933#endif
934 npv = (pv_entry_t)
935 malloc(sizeof *npv, M_VMPVENT, M_NOWAIT);
936 npv->pv_va = va;
937 npv->pv_pmap = pmap;
938 npv->pv_next = pv->pv_next;
8a97dd44
WN
939 pv->pv_next = npv;
940#ifdef DEBUG
941 if (!npv->pv_next)
942 enter_stats.secondpv++;
943#endif
944 splx(s);
945 }
946 }
947 /*
948 * Assumption: if it is not part of our managed memory
949 * then it must be device memory which may be volitile.
950 */
951 if (pmap_initialized) {
952 checkpv = cacheable = FALSE;
953#ifdef DEBUG
954 enter_stats.unmanaged++;
955#endif
956 }
957
958 /*
959 * Increment counters
960 */
961 pmap->pm_stats.resident_count++;
962 if (wired)
963 pmap->pm_stats.wired_count++;
964
965validate:
966 /*
967 * Now validate mapping with desired protection/wiring.
968 * Assume uniform modified and referenced status for all
969 * I386 pages in a MACH page.
970 */
971 npte = (pa & PG_FRAME) | pte_prot(pmap, prot) | PG_V;
972 npte |= (*(int *)pte & (PG_M|PG_U));
973 if (wired)
974 npte |= PG_W;
975 if(va < UPT_MIN_ADDRESS)
976 npte |= PG_u;
977 else if(va < UPT_MAX_ADDRESS)
978 npte |= PG_u | PG_RW;
8a97dd44
WN
979#ifdef DEBUG
980 if (pmapdebug & PDB_ENTER)
75677547 981 printf("enter: new pte value %x ", npte);
8a97dd44
WN
982#endif
983 ix = 0;
984 do {
985 *(int *)pte++ = npte;
986 /*TBIS(va);*/
987 npte += I386_PAGE_SIZE;
988 va += I386_PAGE_SIZE;
989 } while (++ix != i386pagesperpage);
990 pte--;
991#ifdef DEBUGx
75677547 992cache, tlb flushes
8a97dd44 993#endif
75677547 994/*pads(pmap);*/
af359dea
C
995 /*load_cr3(((struct pcb *)curproc->p_addr)->pcb_ptd);*/
996 tlbflush();
997}
998
999/*
1000 * pmap_page_protect:
1001 *
1002 * Lower the permission for all mappings to a given page.
1003 */
1004void
1005pmap_page_protect(phys, prot)
1006 vm_offset_t phys;
1007 vm_prot_t prot;
1008{
1009 switch (prot) {
1010 case VM_PROT_READ:
1011 case VM_PROT_READ|VM_PROT_EXECUTE:
1012 pmap_copy_on_write(phys);
1013 break;
1014 case VM_PROT_ALL:
1015 break;
1016 default:
1017 pmap_remove_all(phys);
1018 break;
1019 }
8a97dd44
WN
1020}
1021
1022/*
1023 * Routine: pmap_change_wiring
1024 * Function: Change the wiring attribute for a map/virtual-address
1025 * pair.
1026 * In/out conditions:
1027 * The mapping must already exist in the pmap.
1028 */
1029void
1030pmap_change_wiring(pmap, va, wired)
1031 register pmap_t pmap;
1032 vm_offset_t va;
1033 boolean_t wired;
1034{
1035 register pt_entry_t *pte;
1036 register int ix;
1037
1038#ifdef DEBUG
1039 if (pmapdebug & PDB_FOLLOW)
75677547 1040 printf("pmap_change_wiring(%x, %x, %x)", pmap, va, wired);
8a97dd44 1041#endif
af359dea 1042 if (pmap == NULL)
8a97dd44
WN
1043 return;
1044
1045 pte = pmap_pte(pmap, va);
1046#ifdef DEBUG
1047 /*
1048 * Page table page is not allocated.
1049 * Should this ever happen? Ignore it for now,
1050 * we don't want to force allocation of unnecessary PTE pages.
1051 */
1052 if (!pmap_pde_v(pmap_pde(pmap, va))) {
1053 if (pmapdebug & PDB_PARANOIA)
75677547 1054 pg("pmap_change_wiring: invalid PDE for %x ", va);
8a97dd44
WN
1055 return;
1056 }
1057 /*
1058 * Page not valid. Should this ever happen?
1059 * Just continue and change wiring anyway.
1060 */
1061 if (!pmap_pte_v(pte)) {
1062 if (pmapdebug & PDB_PARANOIA)
75677547 1063 pg("pmap_change_wiring: invalid PTE for %x ", va);
8a97dd44
WN
1064 }
1065#endif
1066 if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) {
1067 if (wired)
1068 pmap->pm_stats.wired_count++;
1069 else
1070 pmap->pm_stats.wired_count--;
1071 }
1072 /*
1073 * Wiring is not a hardware characteristic so there is no need
1074 * to invalidate TLB.
1075 */
1076 ix = 0;
1077 do {
1078 pmap_pte_set_w(pte++, wired);
1079 } while (++ix != i386pagesperpage);
1080}
1081
1082/*
1083 * Routine: pmap_pte
1084 * Function:
1085 * Extract the page table entry associated
1086 * with the given map/virtual_address pair.
75677547 1087 * [ what about induced faults -wfj]
8a97dd44
WN
1088 */
1089
1090struct pte *pmap_pte(pmap, va)
1091 register pmap_t pmap;
1092 vm_offset_t va;
1093{
1094
1095#ifdef DEBUGx
1096 if (pmapdebug & PDB_FOLLOW)
1097 printf("pmap_pte(%x, %x) ->\n", pmap, va);
1098#endif
1099 if (pmap && pmap_pde_v(pmap_pde(pmap, va))) {
1100
1101 /* are we current address space or kernel? */
75677547 1102 if (pmap->pm_pdir[PTDPTDI].pd_pfnum == PTDpde.pd_pfnum
8a97dd44
WN
1103 || pmap == kernel_pmap)
1104 return ((struct pte *) vtopte(va));
1105
1106 /* otherwise, we are alternate address space */
1107 else {
75677547
WN
1108 if (pmap->pm_pdir[PTDPTDI].pd_pfnum
1109 != APTDpde.pd_pfnum) {
8a97dd44 1110 APTDpde = pmap->pm_pdir[PTDPTDI];
af359dea 1111 tlbflush();
8a97dd44
WN
1112 }
1113 return((struct pte *) avtopte(va));
1114 }
1115 }
1116 return(0);
1117}
1118
1119/*
1120 * Routine: pmap_extract
1121 * Function:
1122 * Extract the physical page address associated
1123 * with the given map/virtual_address pair.
1124 */
1125
1126vm_offset_t
1127pmap_extract(pmap, va)
1128 register pmap_t pmap;
1129 vm_offset_t va;
1130{
1131 register vm_offset_t pa;
1132
1133#ifdef DEBUGx
1134 if (pmapdebug & PDB_FOLLOW)
1135 pg("pmap_extract(%x, %x) -> ", pmap, va);
1136#endif
1137 pa = 0;
1138 if (pmap && pmap_pde_v(pmap_pde(pmap, va))) {
8a97dd44 1139 pa = *(int *) pmap_pte(pmap, va);
8a97dd44
WN
1140 }
1141 if (pa)
1142 pa = (pa & PG_FRAME) | (va & ~PG_FRAME);
1143#ifdef DEBUGx
1144 if (pmapdebug & PDB_FOLLOW)
1145 printf("%x\n", pa);
1146#endif
1147 return(pa);
1148}
1149
1150/*
1151 * Copy the range specified by src_addr/len
1152 * from the source map to the range dst_addr/len
1153 * in the destination map.
1154 *
1155 * This routine is only advisory and need not do anything.
1156 */
1157void pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
1158 pmap_t dst_pmap;
1159 pmap_t src_pmap;
1160 vm_offset_t dst_addr;
1161 vm_size_t len;
1162 vm_offset_t src_addr;
1163{
1164#ifdef DEBUG
1165 if (pmapdebug & PDB_FOLLOW)
75677547 1166 printf("pmap_copy(%x, %x, %x, %x, %x)",
8a97dd44
WN
1167 dst_pmap, src_pmap, dst_addr, len, src_addr);
1168#endif
1169}
1170
1171/*
1172 * Require that all active physical maps contain no
1173 * incorrect entries NOW. [This update includes
1174 * forcing updates of any address map caching.]
1175 *
1176 * Generally used to insure that a thread about
1177 * to run will see a semantically correct world.
1178 */
1179void pmap_update()
1180{
1181#ifdef DEBUG
1182 if (pmapdebug & PDB_FOLLOW)
75677547 1183 printf("pmap_update()");
8a97dd44 1184#endif
af359dea 1185 tlbflush();
8a97dd44
WN
1186}
1187
1188/*
1189 * Routine: pmap_collect
1190 * Function:
1191 * Garbage collects the physical map system for
1192 * pages which are no longer used.
1193 * Success need not be guaranteed -- that is, there
1194 * may well be pages which are not referenced, but
1195 * others may be collected.
1196 * Usage:
1197 * Called by the pageout daemon when pages are scarce.
75677547 1198 * [ needs to be written -wfj ]
8a97dd44
WN
1199 */
1200void
1201pmap_collect(pmap)
1202 pmap_t pmap;
1203{
1204 register vm_offset_t pa;
1205 register pv_entry_t pv;
1206 register int *pte;
1207 vm_offset_t kpa;
1208 int s;
1209
1210#ifdef DEBUG
1211 int *pde;
1212 int opmapdebug;
75677547 1213 printf("pmap_collect(%x) ", pmap);
af359dea 1214#endif
8a97dd44
WN
1215 if (pmap != kernel_pmap)
1216 return;
1217
8a97dd44
WN
1218}
1219
af359dea 1220/* [ macro again?, should I force kstack into user map here? -wfj ] */
8a97dd44
WN
1221void
1222pmap_activate(pmap, pcbp)
1223 register pmap_t pmap;
1224 struct pcb *pcbp;
1225{
1226int x;
1227#ifdef DEBUG
1228 if (pmapdebug & (PDB_FOLLOW|PDB_PDRTAB))
75677547 1229 pg("pmap_activate(%x, %x) ", pmap, pcbp);
8a97dd44
WN
1230#endif
1231 PMAP_ACTIVATE(pmap, pcbp);
1232/*printf("pde ");
1233for(x=0x3f6; x < 0x3fA; x++)
1234 printf("%x ", pmap->pm_pdir[x]);*/
75677547
WN
1235/*pads(pmap);*/
1236/*pg(" pcb_cr3 %x", pcbp->pcb_cr3);*/
8a97dd44
WN
1237}
1238
1239/*
1240 * Routine: pmap_kernel
1241 * Function:
1242 * Returns the physical map handle for the kernel.
1243 */
1244pmap_t
1245pmap_kernel()
1246{
1247 return (kernel_pmap);
1248}
1249
1250/*
1251 * pmap_zero_page zeros the specified (machine independent)
1252 * page by mapping the page into virtual memory and using
1253 * bzero to clear its contents, one machine dependent page
1254 * at a time.
1255 */
1256pmap_zero_page(phys)
1257 register vm_offset_t phys;
1258{
1259 register int ix;
1260
1261#ifdef DEBUG
1262 if (pmapdebug & PDB_FOLLOW)
75677547 1263 printf("pmap_zero_page(%x)", phys);
8a97dd44
WN
1264#endif
1265 phys >>= PG_SHIFT;
1266 ix = 0;
1267 do {
1268 clearseg(phys++);
1269 } while (++ix != i386pagesperpage);
1270}
1271
1272/*
1273 * pmap_copy_page copies the specified (machine independent)
1274 * page by mapping the page into virtual memory and using
1275 * bcopy to copy the page, one machine dependent page at a
1276 * time.
1277 */
1278pmap_copy_page(src, dst)
1279 register vm_offset_t src, dst;
1280{
1281 register int ix;
1282
1283#ifdef DEBUG
1284 if (pmapdebug & PDB_FOLLOW)
75677547 1285 printf("pmap_copy_page(%x, %x)", src, dst);
8a97dd44
WN
1286#endif
1287 src >>= PG_SHIFT;
1288 dst >>= PG_SHIFT;
1289 ix = 0;
1290 do {
1291 physcopyseg(src++, dst++);
1292 } while (++ix != i386pagesperpage);
1293}
1294
1295
1296/*
1297 * Routine: pmap_pageable
1298 * Function:
1299 * Make the specified pages (by pmap, offset)
1300 * pageable (or not) as requested.
1301 *
1302 * A page which is not pageable may not take
1303 * a fault; therefore, its page table entry
1304 * must remain valid for the duration.
1305 *
1306 * This routine is merely advisory; pmap_enter
1307 * will specify that these pages are to be wired
1308 * down (or not) as appropriate.
1309 */
1310pmap_pageable(pmap, sva, eva, pageable)
1311 pmap_t pmap;
1312 vm_offset_t sva, eva;
1313 boolean_t pageable;
1314{
1315#ifdef DEBUG
1316 if (pmapdebug & PDB_FOLLOW)
75677547 1317 printf("pmap_pageable(%x, %x, %x, %x)",
8a97dd44
WN
1318 pmap, sva, eva, pageable);
1319#endif
1320 /*
1321 * If we are making a PT page pageable then all valid
1322 * mappings must be gone from that page. Hence it should
1323 * be all zeros and there is no need to clean it.
1324 * Assumptions:
1325 * - we are called with only one page at a time
1326 * - PT pages have only one pv_table entry
1327 */
1328 if (pmap == kernel_pmap && pageable && sva + PAGE_SIZE == eva) {
1329 register pv_entry_t pv;
1330 register vm_offset_t pa;
1331
1332#ifdef DEBUG
1333 if ((pmapdebug & (PDB_FOLLOW|PDB_PTPAGE)) == PDB_PTPAGE)
75677547 1334 printf("pmap_pageable(%x, %x, %x, %x)",
8a97dd44
WN
1335 pmap, sva, eva, pageable);
1336#endif
75677547
WN
1337 /*if (!pmap_pde_v(pmap_pde(pmap, sva)))
1338 return;*/
1339 if(pmap_pte(pmap, sva) == 0)
8a97dd44
WN
1340 return;
1341 pa = pmap_pte_pa(pmap_pte(pmap, sva));
1342 if (pa < vm_first_phys || pa >= vm_last_phys)
1343 return;
1344 pv = pa_to_pvh(pa);
75677547
WN
1345 /*if (!ispt(pv->pv_va))
1346 return;*/
8a97dd44
WN
1347#ifdef DEBUG
1348 if (pv->pv_va != sva || pv->pv_next) {
1349 pg("pmap_pageable: bad PT page va %x next %x\n",
1350 pv->pv_va, pv->pv_next);
1351 return;
1352 }
1353#endif
1354 /*
1355 * Mark it unmodified to avoid pageout
1356 */
1357 pmap_clear_modify(pa);
75677547 1358#ifdef needsomethinglikethis
8a97dd44
WN
1359 if (pmapdebug & PDB_PTPAGE)
1360 pg("pmap_pageable: PT page %x(%x) unmodified\n",
1361 sva, *(int *)pmap_pte(pmap, sva));
1362 if (pmapdebug & PDB_WIRING)
1363 pmap_check_wiring("pageable", sva);
1364#endif
1365 }
1366}
1367
1368/*
1369 * Clear the modify bits on the specified physical page.
1370 */
1371
1372void
1373pmap_clear_modify(pa)
1374 vm_offset_t pa;
1375{
1376#ifdef DEBUG
1377 if (pmapdebug & PDB_FOLLOW)
75677547 1378 printf("pmap_clear_modify(%x)", pa);
8a97dd44
WN
1379#endif
1380 pmap_changebit(pa, PG_M, FALSE);
1381}
1382
1383/*
1384 * pmap_clear_reference:
1385 *
1386 * Clear the reference bit on the specified physical page.
1387 */
1388
1389void pmap_clear_reference(pa)
1390 vm_offset_t pa;
1391{
1392#ifdef DEBUG
1393 if (pmapdebug & PDB_FOLLOW)
75677547 1394 printf("pmap_clear_reference(%x)", pa);
8a97dd44
WN
1395#endif
1396 pmap_changebit(pa, PG_U, FALSE);
1397}
1398
1399/*
1400 * pmap_is_referenced:
1401 *
1402 * Return whether or not the specified physical page is referenced
1403 * by any physical maps.
1404 */
1405
1406boolean_t
1407pmap_is_referenced(pa)
1408 vm_offset_t pa;
1409{
1410#ifdef DEBUG
1411 if (pmapdebug & PDB_FOLLOW) {
1412 boolean_t rv = pmap_testbit(pa, PG_U);
75677547 1413 printf("pmap_is_referenced(%x) -> %c", pa, "FT"[rv]);
8a97dd44
WN
1414 return(rv);
1415 }
1416#endif
1417 return(pmap_testbit(pa, PG_U));
1418}
1419
1420/*
1421 * pmap_is_modified:
1422 *
1423 * Return whether or not the specified physical page is modified
1424 * by any physical maps.
1425 */
1426
1427boolean_t
1428pmap_is_modified(pa)
1429 vm_offset_t pa;
1430{
1431#ifdef DEBUG
1432 if (pmapdebug & PDB_FOLLOW) {
1433 boolean_t rv = pmap_testbit(pa, PG_M);
75677547 1434 printf("pmap_is_modified(%x) -> %c", pa, "FT"[rv]);
8a97dd44
WN
1435 return(rv);
1436 }
1437#endif
1438 return(pmap_testbit(pa, PG_M));
1439}
1440
1441vm_offset_t
1442pmap_phys_address(ppn)
1443 int ppn;
1444{
1445 return(i386_ptob(ppn));
1446}
1447
1448/*
1449 * Miscellaneous support routines follow
1450 */
1451
75677547 1452static
8a97dd44
WN
1453i386_protection_init()
1454{
1455 register int *kp, prot;
1456
1457 kp = protection_codes;
1458 for (prot = 0; prot < 8; prot++) {
1459 switch (prot) {
1460 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
1461 *kp++ = 0;
1462 break;
1463 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
1464 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
1465 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
1466 *kp++ = PG_RO;
1467 break;
1468 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
1469 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
1470 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE:
1471 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE:
1472 *kp++ = PG_RW;
1473 break;
1474 }
1475 }
1476}
1477
75677547 1478static
8a97dd44
WN
1479boolean_t
1480pmap_testbit(pa, bit)
1481 register vm_offset_t pa;
1482 int bit;
1483{
1484 register pv_entry_t pv;
1485 register int *pte, ix;
1486 int s;
1487
1488 if (pa < vm_first_phys || pa >= vm_last_phys)
1489 return(FALSE);
1490
1491 pv = pa_to_pvh(pa);
1492 s = splimp();
1493 /*
1494 * Check saved info first
1495 */
1496 if (pmap_attributes[pa_index(pa)] & bit) {
1497 splx(s);
1498 return(TRUE);
1499 }
1500 /*
1501 * Not found, check current mappings returning
1502 * immediately if found.
1503 */
af359dea 1504 if (pv->pv_pmap != NULL) {
8a97dd44
WN
1505 for (; pv; pv = pv->pv_next) {
1506 pte = (int *) pmap_pte(pv->pv_pmap, pv->pv_va);
1507 ix = 0;
1508 do {
1509 if (*pte++ & bit) {
1510 splx(s);
1511 return(TRUE);
1512 }
1513 } while (++ix != i386pagesperpage);
1514 }
1515 }
1516 splx(s);
1517 return(FALSE);
1518}
1519
75677547 1520static
8a97dd44
WN
1521pmap_changebit(pa, bit, setem)
1522 register vm_offset_t pa;
1523 int bit;
1524 boolean_t setem;
1525{
1526 register pv_entry_t pv;
1527 register int *pte, npte, ix;
1528 vm_offset_t va;
1529 int s;
1530 boolean_t firstpage = TRUE;
1531
1532#ifdef DEBUG
1533 if (pmapdebug & PDB_BITS)
75677547 1534 printf("pmap_changebit(%x, %x, %s)",
8a97dd44
WN
1535 pa, bit, setem ? "set" : "clear");
1536#endif
1537 if (pa < vm_first_phys || pa >= vm_last_phys)
1538 return;
1539
1540 pv = pa_to_pvh(pa);
1541 s = splimp();
1542 /*
1543 * Clear saved attributes (modify, reference)
1544 */
1545 if (!setem)
1546 pmap_attributes[pa_index(pa)] &= ~bit;
1547 /*
1548 * Loop over all current mappings setting/clearing as appropos
1549 * If setting RO do we need to clear the VAC?
1550 */
af359dea 1551 if (pv->pv_pmap != NULL) {
8a97dd44
WN
1552#ifdef DEBUG
1553 int toflush = 0;
1554#endif
1555 for (; pv; pv = pv->pv_next) {
1556#ifdef DEBUG
1557 toflush |= (pv->pv_pmap == kernel_pmap) ? 2 : 1;
1558#endif
1559 va = pv->pv_va;
af359dea
C
1560
1561 /*
1562 * XXX don't write protect pager mappings
1563 */
1564 if (bit == PG_RO) {
1565 extern vm_offset_t pager_sva, pager_eva;
1566
1567 if (va >= pager_sva && va < pager_eva)
1568 continue;
1569 }
1570
8a97dd44
WN
1571 pte = (int *) pmap_pte(pv->pv_pmap, va);
1572 ix = 0;
1573 do {
1574 if (setem)
1575 npte = *pte | bit;
1576 else
1577 npte = *pte & ~bit;
1578 if (*pte != npte) {
1579 *pte = npte;
1580 /*TBIS(va);*/
1581 }
1582 va += I386_PAGE_SIZE;
1583 pte++;
1584 } while (++ix != i386pagesperpage);
1585
af359dea
C
1586 if (pv->pv_pmap == &curproc->p_vmspace->vm_pmap)
1587 pmap_activate(pv->pv_pmap, (struct pcb *)curproc->p_addr);
8a97dd44 1588 }
75677547 1589#ifdef somethinglikethis
8a97dd44
WN
1590 if (setem && bit == PG_RO && (pmapvacflush & PVF_PROTECT)) {
1591 if ((pmapvacflush & PVF_TOTAL) || toflush == 3)
1592 DCIA();
1593 else if (toflush == 2)
1594 DCIS();
1595 else
1596 DCIU();
1597 }
1598#endif
1599 }
1600 splx(s);
1601}
1602
8a97dd44
WN
1603#ifdef DEBUG
1604pmap_pvdump(pa)
1605 vm_offset_t pa;
1606{
1607 register pv_entry_t pv;
1608
75677547
WN
1609 printf("pa %x", pa);
1610 for (pv = pa_to_pvh(pa); pv; pv = pv->pv_next) {
1611 printf(" -> pmap %x, va %x, flags %x",
1612 pv->pv_pmap, pv->pv_va, pv->pv_flags);
1613 pads(pv->pv_pmap);
1614 }
1615 printf(" ");
8a97dd44
WN
1616}
1617
75677547 1618#ifdef notyet
8a97dd44
WN
1619pmap_check_wiring(str, va)
1620 char *str;
1621 vm_offset_t va;
1622{
1623 vm_map_entry_t entry;
1624 register int count, *pte;
1625
1626 va = trunc_page(va);
1627 if (!pmap_pde_v(pmap_pde(kernel_pmap, va)) ||
1628 !pmap_pte_v(pmap_pte(kernel_pmap, va)))
1629 return;
1630
1631 if (!vm_map_lookup_entry(pt_map, va, &entry)) {
1632 pg("wired_check: entry for %x not found\n", va);
1633 return;
1634 }
1635 count = 0;
1636 for (pte = (int *)va; pte < (int *)(va+PAGE_SIZE); pte++)
1637 if (*pte)
1638 count++;
1639 if (entry->wired_count != count)
1640 pg("*%s*: %x: w%d/a%d\n",
1641 str, va, entry->wired_count, count);
1642}
75677547 1643#endif
8a97dd44
WN
1644
1645/* print address space of pmap*/
1646pads(pm) pmap_t pm; {
1647 unsigned va, i, j;
1648 struct pte *ptep;
1649
75677547 1650 if(pm == kernel_pmap) return;
8a97dd44
WN
1651 for (i = 0; i < 1024; i++)
1652 if(pm->pm_pdir[i].pd_v)
1653 for (j = 0; j < 1024 ; j++) {
1654 va = (i<<22)+(j<<12);
1655 if (pm == kernel_pmap && va < 0xfe000000)
1656 continue;
75677547 1657 if (pm != kernel_pmap && va > UPT_MAX_ADDRESS)
8a97dd44
WN
1658 continue;
1659 ptep = pmap_pte(pm, va);
1660 if(pmap_pte_v(ptep))
1661 printf("%x:%x ", va, *(int *)ptep);
1662 } ;
1663
1664}
1665#endif