convert vm_page bit fields to flags
[unix-history] / usr / src / sys / hp300 / hp300 / pmap.c
CommitLineData
8f961915 1/*
8f961915
KM
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
4a4de5a4
KM
6 * the Systems Programming Group of the University of Utah Computer
7 * Science Department.
8f961915 8 *
4a4de5a4 9 * %sccs.include.redist.c%
8f961915 10 *
2cbf9af3 11 * @(#)pmap.c 7.13 (Berkeley) %G%
8f961915
KM
12 */
13
14/*
15 * HP9000/300 series physical map management code.
ce01f8ae
MH
16 * Supports:
17 * 68020 with HP MMU models 320, 350
18 * 68020 with 68551 MMU models 318, 319, 330 (all untested)
19 * 68030 with on-chip MMU models 340, 360, 370, 345, 375, 400
20 * 68040 with on-chip MMU models 380, 425, 433
8f961915 21 * Don't even pay lip service to multiprocessor support.
4bc66f7c 22 *
9acfa6cd 23 * XXX will only work for PAGE_SIZE == NBPG (i.e. 4096 bytes).
ce01f8ae 24 * Hence, there is no point in defining DYNPGSIZE as it stands now.
8f961915
KM
25 */
26
27/*
28 * Manages physical address maps.
29 *
30 * In addition to hardware address maps, this
31 * module is called upon to provide software-use-only
32 * maps which may or may not be stored in the same
33 * form as hardware maps. These pseudo-maps are
34 * used to store intermediate results from copy
35 * operations to and from address spaces.
36 *
37 * Since the information managed by this module is
38 * also stored by the logical address mapping module,
39 * this module may throw away valid virtual-to-physical
40 * mappings at almost any time. However, invalidations
41 * of virtual-to-physical mappings must be done as
42 * requested.
43 *
44 * In order to cope with hardware architectures which
45 * make virtual-to-physical map invalidates expensive,
46 * this module may delay invalidate or reduced protection
47 * operations until such time as they are actually
48 * necessary. This module is given full information as
49 * to which processors are currently using which maps,
50 * and to when physical maps must be made correct.
51 */
52
53#include "param.h"
93994beb 54#include "systm.h"
8f961915 55#include "proc.h"
8f961915 56#include "malloc.h"
2059b854 57#include "user.h"
8f961915 58
2059b854 59#include "pte.h"
8f961915 60
2059b854
MK
61#include "vm/vm.h"
62#include "vm/vm_kern.h"
63#include "vm/vm_page.h"
2059b854
MK
64
65#include "../include/cpu.h"
8f961915 66
8f961915
KM
67#ifdef DEBUG
68struct {
69 int collectscans;
70 int collectpages;
71 int kpttotal;
72 int kptinuse;
73 int kptmaxuse;
74} kpt_stats;
75struct {
76 int kernel; /* entering kernel mapping */
77 int user; /* entering user mapping */
78 int ptpneeded; /* needed to allocate a PT page */
79 int pwchange; /* no mapping change, just wiring or protection */
80 int wchange; /* no mapping change, just wiring */
81 int mchange; /* was mapped but mapping to different page */
82 int managed; /* a managed page */
83 int firstpv; /* first mapping for this PA */
84 int secondpv; /* second mapping for this PA */
85 int ci; /* cache inhibited */
86 int unmanaged; /* not a managed page */
87 int flushes; /* cache flushes */
88} enter_stats;
89struct {
90 int calls;
91 int removes;
92 int pvfirst;
93 int pvsearch;
94 int ptinvalid;
95 int uflushes;
96 int sflushes;
97} remove_stats;
9acfa6cd
MH
98struct {
99 int calls;
100 int pages;
101 int alreadyro;
102 int alreadyrw;
103} protect_stats;
8f961915
KM
104
105int debugmap = 0;
106int pmapdebug = 0x2000;
107#define PDB_FOLLOW 0x0001
108#define PDB_INIT 0x0002
109#define PDB_ENTER 0x0004
110#define PDB_REMOVE 0x0008
111#define PDB_CREATE 0x0010
112#define PDB_PTPAGE 0x0020
113#define PDB_CACHE 0x0040
114#define PDB_BITS 0x0080
115#define PDB_COLLECT 0x0100
116#define PDB_PROTECT 0x0200
117#define PDB_SEGTAB 0x0400
118#define PDB_PARANOIA 0x2000
119#define PDB_WIRING 0x4000
120#define PDB_PVDUMP 0x8000
121
122int pmapvacflush = 0;
123#define PVF_ENTER 0x01
124#define PVF_REMOVE 0x02
125#define PVF_PROTECT 0x04
126#define PVF_TOTAL 0x80
4bc66f7c 127
9acfa6cd
MH
128#if defined(HP380)
129int dowriteback = 1; /* 68040: enable writeback caching */
130int dokwriteback = 1; /* 68040: enable writeback caching of kernel AS */
131#endif
132
4bc66f7c 133extern vm_offset_t pager_sva, pager_eva;
8f961915
KM
134#endif
135
136/*
137 * Get STEs and PTEs for user/kernel address space
138 */
9acfa6cd
MH
139#if defined(HP380)
140#define pmap_ste(m, v) \
141 (&((m)->pm_stab[(vm_offset_t)(v) \
142 >> (mmutype == MMU_68040 ? SG4_SHIFT1 : SG_ISHIFT)]))
143#define pmap_ste1(m, v) \
144 (&((m)->pm_stab[(vm_offset_t)(v) >> SG4_SHIFT1]))
145/* XXX assumes physically contiguous ST pages (if more than one) */
146#define pmap_ste2(m, v) \
147 (&((m)->pm_stab[(st_entry_t *)(*(u_int *)pmap_ste1(m, v) & SG4_ADDR1) \
148 - (m)->pm_stpa + (((v) & SG4_MASK2) >> SG4_SHIFT2)]))
149#else
8f961915 150#define pmap_ste(m, v) (&((m)->pm_stab[(vm_offset_t)(v) >> SG_ISHIFT]))
9acfa6cd 151#endif
8f961915
KM
152#define pmap_pte(m, v) (&((m)->pm_ptab[(vm_offset_t)(v) >> PG_SHIFT]))
153
9acfa6cd
MH
154#if defined(HP380)
155#define pmap_ste_v(m, v) \
156 (mmutype == MMU_68040 \
157 ? (pmap_ste1(m, v)->sg_v && pmap_ste2(m, v)->sg_v) \
158 : (pmap_ste(m, v)->sg_v))
159#else
160#define pmap_ste_v(m, v) (pmap_ste(m, v)->sg_v)
161#endif
8f961915 162
9acfa6cd 163#define pmap_pte_pa(pte) (*(int *)(pte) & PG_FRAME)
8f961915
KM
164#define pmap_pte_w(pte) ((pte)->pg_w)
165#define pmap_pte_ci(pte) ((pte)->pg_ci)
166#define pmap_pte_m(pte) ((pte)->pg_m)
167#define pmap_pte_u(pte) ((pte)->pg_u)
9acfa6cd 168#define pmap_pte_prot(pte) ((pte)->pg_prot)
8f961915
KM
169#define pmap_pte_v(pte) ((pte)->pg_v)
170#define pmap_pte_set_w(pte, v) ((pte)->pg_w = (v))
171#define pmap_pte_set_prot(pte, v) ((pte)->pg_prot = (v))
172
173/*
174 * Given a map and a machine independent protection code,
175 * convert to a vax protection code.
176 */
177#define pte_prot(m, p) (protection_codes[p])
178int protection_codes[8];
179
180/*
181 * Kernel page table page management.
182 */
183struct kpt_page {
184 struct kpt_page *kpt_next; /* link on either used or free list */
185 vm_offset_t kpt_va; /* always valid kernel VA */
186 vm_offset_t kpt_pa; /* PA of this page (for speed) */
187};
188struct kpt_page *kpt_free_list, *kpt_used_list;
189struct kpt_page *kpt_pages;
190
191/*
192 * Kernel segment/page table and page table map.
193 * The page table map gives us a level of indirection we need to dynamically
194 * expand the page table. It is essentially a copy of the segment table
195 * with PTEs instead of STEs. All are initialized in locore at boot time.
196 * Sysmap will initially contain VM_KERNEL_PT_PAGES pages of PTEs.
197 * Segtabzero is an empty segment table which all processes share til they
198 * reference something.
199 */
200st_entry_t *Sysseg;
201pt_entry_t *Sysmap, *Sysptmap;
9acfa6cd 202st_entry_t *Segtabzero, *Segtabzeropa;
8f961915 203vm_size_t Sysptsize = VM_KERNEL_PT_PAGES;
8f961915
KM
204
205struct pmap kernel_pmap_store;
8f961915
KM
206vm_map_t pt_map;
207
208vm_offset_t avail_start; /* PA of first available physical page */
209vm_offset_t avail_end; /* PA of last available physical page */
210vm_size_t mem_size; /* memory size in bytes */
211vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss)*/
212vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
213vm_offset_t vm_first_phys; /* PA of first managed page */
214vm_offset_t vm_last_phys; /* PA just past last managed page */
9acfa6cd 215#if defined(DYNPGSIZE)
8f961915 216int hppagesperpage; /* PAGE_SIZE / HP_PAGE_SIZE */
9acfa6cd 217#endif
8f961915
KM
218boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
219int pmap_aliasmask; /* seperation at which VA aliasing ok */
220char *pmap_attributes; /* reference and modify bits */
9acfa6cd
MH
221#if defined(HP380)
222int protostfree;
223#endif
8f961915
KM
224
225boolean_t pmap_testbit();
226void pmap_enter_ptpage();
227
fd518746
WN
228/*
229 * Bootstrap memory allocator. This function allows for early dynamic
230 * memory allocation until the virtual memory system has been bootstrapped.
231 * After that point, either kmem_alloc or malloc should be used. This
232 * function works by stealing pages from the (to be) managed page pool,
233 * stealing virtual address space, then mapping the pages and zeroing them.
234 *
235 * It should be used from pmap_bootstrap till vm_page_startup, afterwards
236 * it cannot be used, and will generate a panic if tried. Note that this
237 * memory will never be freed, and in essence it is wired down.
238 */
239void *
240pmap_bootstrap_alloc(size) {
241 vm_offset_t val;
242 int i;
243 extern boolean_t vm_page_startup_initialized;
244
245 if (vm_page_startup_initialized)
246 panic("pmap_bootstrap_alloc: called after startup initialized");
247 size = round_page(size);
248 val = virtual_avail;
249
250 virtual_avail = pmap_map(virtual_avail, avail_start,
251 avail_start + size, VM_PROT_READ|VM_PROT_WRITE);
252 avail_start += size;
253
254 blkclr ((caddr_t) val, size);
255 return ((void *) val);
256}
257
8f961915
KM
258/*
259 * Initialize the pmap module.
260 * Called by vm_init, to initialize any structures that the pmap
261 * system needs to map virtual memory.
262 */
263void
264pmap_init(phys_start, phys_end)
265 vm_offset_t phys_start, phys_end;
266{
267 vm_offset_t addr, addr2;
268 vm_size_t npg, s;
269 int rv;
a1af79ae 270 extern char kstack[];
8f961915
KM
271
272#ifdef DEBUG
273 if (pmapdebug & PDB_FOLLOW)
274 printf("pmap_init(%x, %x)\n", phys_start, phys_end);
275#endif
276 /*
277 * Now that kernel map has been allocated, we can mark as
278 * unavailable regions which we have mapped in locore.
279 */
4bc66f7c 280 addr = (vm_offset_t) intiobase;
2059b854 281 (void) vm_map_find(kernel_map, NULL, (vm_offset_t) 0,
4bc66f7c
MH
282 &addr, hp300_ptob(IIOMAPSIZE+EIOMAPSIZE), FALSE);
283 if (addr != (vm_offset_t)intiobase)
8f961915
KM
284 goto bogons;
285 addr = (vm_offset_t) Sysmap;
286 vm_object_reference(kernel_object);
287 (void) vm_map_find(kernel_map, kernel_object, addr,
288 &addr, HP_MAX_PTSIZE, FALSE);
289 /*
290 * If this fails it is probably because the static portion of
291 * the kernel page table isn't big enough and we overran the
292 * page table map. Need to adjust pmap_size() in hp300_init.c.
293 */
294 if (addr != (vm_offset_t)Sysmap)
295 goto bogons;
296
a1af79ae 297 addr = (vm_offset_t) kstack;
8f961915
KM
298 vm_object_reference(kernel_object);
299 (void) vm_map_find(kernel_map, kernel_object, addr,
300 &addr, hp300_ptob(UPAGES), FALSE);
a1af79ae 301 if (addr != (vm_offset_t)kstack)
8f961915
KM
302bogons:
303 panic("pmap_init: bogons in the VM system!\n");
304
305#ifdef DEBUG
306 if (pmapdebug & PDB_INIT) {
307 printf("pmap_init: Sysseg %x, Sysmap %x, Sysptmap %x\n",
308 Sysseg, Sysmap, Sysptmap);
309 printf(" pstart %x, pend %x, vstart %x, vend %x\n",
310 avail_start, avail_end, virtual_avail, virtual_end);
311 }
312#endif
313
314 /*
315 * Allocate memory for random pmap data structures. Includes the
316 * initial segment table, pv_head_table and pmap_attributes.
317 */
318 npg = atop(phys_end - phys_start);
319 s = (vm_size_t) (HP_STSIZE + sizeof(struct pv_entry) * npg + npg);
320 s = round_page(s);
321 addr = (vm_offset_t) kmem_alloc(kernel_map, s);
322 Segtabzero = (st_entry_t *) addr;
9acfa6cd 323 Segtabzeropa = (st_entry_t *) pmap_extract(kernel_pmap, addr);
8f961915
KM
324 addr += HP_STSIZE;
325 pv_table = (pv_entry_t) addr;
326 addr += sizeof(struct pv_entry) * npg;
327 pmap_attributes = (char *) addr;
328#ifdef DEBUG
329 if (pmapdebug & PDB_INIT)
9acfa6cd
MH
330 printf("pmap_init: %x bytes: npg %x s0 %x(%x) tbl %x atr %x\n",
331 s, npg, Segtabzero, Segtabzeropa,
332 pv_table, pmap_attributes);
8f961915
KM
333#endif
334
335 /*
336 * Allocate physical memory for kernel PT pages and their management.
337 * We need 1 PT page per possible task plus some slop.
338 */
2059b854 339 npg = min(atop(HP_MAX_KPTSIZE), maxproc+16);
8f961915
KM
340 s = ptoa(npg) + round_page(npg * sizeof(struct kpt_page));
341
342 /*
343 * Verify that space will be allocated in region for which
344 * we already have kernel PT pages.
345 */
346 addr = 0;
2059b854 347 rv = vm_map_find(kernel_map, NULL, 0, &addr, s, TRUE);
8f961915
KM
348 if (rv != KERN_SUCCESS || addr + s >= (vm_offset_t)Sysmap)
349 panic("pmap_init: kernel PT too small");
350 vm_map_remove(kernel_map, addr, addr + s);
351
352 /*
353 * Now allocate the space and link the pages together to
354 * form the KPT free list.
355 */
356 addr = (vm_offset_t) kmem_alloc(kernel_map, s);
357 s = ptoa(npg);
358 addr2 = addr + s;
359 kpt_pages = &((struct kpt_page *)addr2)[npg];
360 kpt_free_list = (struct kpt_page *) 0;
361 do {
362 addr2 -= HP_PAGE_SIZE;
363 (--kpt_pages)->kpt_next = kpt_free_list;
364 kpt_free_list = kpt_pages;
365 kpt_pages->kpt_va = addr2;
366 kpt_pages->kpt_pa = pmap_extract(kernel_pmap, addr2);
367 } while (addr != addr2);
368#ifdef DEBUG
369 kpt_stats.kpttotal = atop(s);
370 if (pmapdebug & PDB_INIT)
371 printf("pmap_init: KPT: %d pages from %x to %x\n",
372 atop(s), addr, addr + s);
373#endif
374
375 /*
376 * Slightly modified version of kmem_suballoc() to get page table
377 * map where we want it.
378 */
379 addr = HP_PTBASE;
2059b854 380 s = min(HP_PTMAXSIZE, maxproc*HP_MAX_PTSIZE);
8f961915 381 addr2 = addr + s;
2059b854 382 rv = vm_map_find(kernel_map, NULL, 0, &addr, s, TRUE);
8f961915
KM
383 if (rv != KERN_SUCCESS)
384 panic("pmap_init: cannot allocate space for PT map");
385 pmap_reference(vm_map_pmap(kernel_map));
386 pt_map = vm_map_create(vm_map_pmap(kernel_map), addr, addr2, TRUE);
2059b854 387 if (pt_map == NULL)
8f961915
KM
388 panic("pmap_init: cannot create pt_map");
389 rv = vm_map_submap(kernel_map, addr, addr2, pt_map);
390 if (rv != KERN_SUCCESS)
391 panic("pmap_init: cannot map range to pt_map");
392#ifdef DEBUG
393 if (pmapdebug & PDB_INIT)
394 printf("pmap_init: pt_map [%x - %x)\n", addr, addr2);
395#endif
396
9acfa6cd
MH
397#if defined(HP380)
398 if (mmutype == MMU_68040) {
399 protostfree = ~l2tobm(0);
400 for (rv = MAXUL2SIZE; rv < sizeof(protostfree)*NBBY; rv++)
401 protostfree &= ~l2tobm(rv);
402 }
403#endif
404
8f961915
KM
405 /*
406 * Now it is safe to enable pv_table recording.
407 */
408 vm_first_phys = phys_start;
409 vm_last_phys = phys_end;
410 pmap_initialized = TRUE;
411}
412
413/*
414 * Used to map a range of physical addresses into kernel
415 * virtual address space.
416 *
417 * For now, VM is already on, we only need to map the
418 * specified memory.
419 */
420vm_offset_t
421pmap_map(virt, start, end, prot)
422 vm_offset_t virt;
423 vm_offset_t start;
424 vm_offset_t end;
425 int prot;
426{
427#ifdef DEBUG
428 if (pmapdebug & PDB_FOLLOW)
429 printf("pmap_map(%x, %x, %x, %x)\n", virt, start, end, prot);
430#endif
431 while (start < end) {
432 pmap_enter(kernel_pmap, virt, start, prot, FALSE);
433 virt += PAGE_SIZE;
434 start += PAGE_SIZE;
435 }
436 return(virt);
437}
438
439/*
440 * Create and return a physical map.
441 *
442 * If the size specified for the map
443 * is zero, the map is an actual physical
444 * map, and may be referenced by the
445 * hardware.
446 *
447 * If the size specified is non-zero,
448 * the map will be used in software only, and
449 * is bounded by that size.
450 */
451pmap_t
452pmap_create(size)
453 vm_size_t size;
454{
455 register pmap_t pmap;
456
457#ifdef DEBUG
458 if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
459 printf("pmap_create(%x)\n", size);
460#endif
461 /*
462 * Software use map does not need a pmap
463 */
464 if (size)
2059b854 465 return(NULL);
8f961915
KM
466
467 /* XXX: is it ok to wait here? */
468 pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK);
2059b854
MK
469#ifdef notifwewait
470 if (pmap == NULL)
8f961915 471 panic("pmap_create: cannot allocate a pmap");
2059b854
MK
472#endif
473 bzero(pmap, sizeof(*pmap));
474 pmap_pinit(pmap);
475 return (pmap);
476}
8f961915 477
2059b854
MK
478/*
479 * Initialize a preallocated and zeroed pmap structure,
480 * such as one in a vmspace structure.
481 */
482void
483pmap_pinit(pmap)
484 register struct pmap *pmap;
485{
486
487#ifdef DEBUG
488 if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
489 printf("pmap_pinit(%x)\n", pmap);
490#endif
8f961915
KM
491 /*
492 * No need to allocate page table space yet but we do need a
493 * valid segment table. Initially, we point everyone at the
494 * "null" segment table. On the first pmap_enter, a real
495 * segment table will be allocated.
496 */
8f961915 497 pmap->pm_stab = Segtabzero;
9acfa6cd
MH
498 pmap->pm_stpa = Segtabzeropa;
499#if defined(HP380)
500 if (mmutype == MMU_68040)
501 pmap->pm_stfree = protostfree;
502#endif
8f961915 503 pmap->pm_stchanged = TRUE;
8f961915
KM
504 pmap->pm_count = 1;
505 simple_lock_init(&pmap->pm_lock);
8f961915
KM
506}
507
508/*
509 * Retire the given physical map from service.
510 * Should only be called if the map contains
511 * no valid mappings.
512 */
513void
514pmap_destroy(pmap)
515 register pmap_t pmap;
516{
517 int count;
518
519#ifdef DEBUG
520 if (pmapdebug & PDB_FOLLOW)
521 printf("pmap_destroy(%x)\n", pmap);
522#endif
2059b854 523 if (pmap == NULL)
8f961915
KM
524 return;
525
526 simple_lock(&pmap->pm_lock);
527 count = --pmap->pm_count;
528 simple_unlock(&pmap->pm_lock);
2059b854
MK
529 if (count == 0) {
530 pmap_release(pmap);
531 free((caddr_t)pmap, M_VMPMAP);
532 }
533}
8f961915 534
2059b854
MK
535/*
536 * Release any resources held by the given physical map.
537 * Called when a pmap initialized by pmap_pinit is being released.
538 * Should only be called if the map contains no valid mappings.
539 */
540void
541pmap_release(pmap)
542 register struct pmap *pmap;
543{
544
545#ifdef DEBUG
546 if (pmapdebug & PDB_FOLLOW)
547 printf("pmap_release(%x)\n", pmap);
548#endif
549#ifdef notdef /* DIAGNOSTIC */
550 /* count would be 0 from pmap_destroy... */
551 simple_lock(&pmap->pm_lock);
552 if (pmap->pm_count != 1)
553 panic("pmap_release count");
554#endif
8f961915
KM
555 if (pmap->pm_ptab)
556 kmem_free_wakeup(pt_map, (vm_offset_t)pmap->pm_ptab,
557 HP_MAX_PTSIZE);
558 if (pmap->pm_stab != Segtabzero)
559 kmem_free(kernel_map, (vm_offset_t)pmap->pm_stab, HP_STSIZE);
8f961915
KM
560}
561
562/*
563 * Add a reference to the specified pmap.
564 */
565void
566pmap_reference(pmap)
567 pmap_t pmap;
568{
569#ifdef DEBUG
570 if (pmapdebug & PDB_FOLLOW)
571 printf("pmap_reference(%x)\n", pmap);
572#endif
2059b854 573 if (pmap != NULL) {
8f961915
KM
574 simple_lock(&pmap->pm_lock);
575 pmap->pm_count++;
576 simple_unlock(&pmap->pm_lock);
577 }
578}
579
580/*
581 * Remove the given range of addresses from the specified map.
582 *
583 * It is assumed that the start and end are properly
584 * rounded to the page size.
585 */
586void
587pmap_remove(pmap, sva, eva)
588 register pmap_t pmap;
589 vm_offset_t sva, eva;
590{
591 register vm_offset_t pa, va;
592 register pt_entry_t *pte;
593 register pv_entry_t pv, npv;
8f961915
KM
594 pmap_t ptpmap;
595 int *ste, s, bits;
596 boolean_t firstpage = TRUE;
597 boolean_t flushcache = FALSE;
598#ifdef DEBUG
599 pt_entry_t opte;
600
601 if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
602 printf("pmap_remove(%x, %x, %x)\n", pmap, sva, eva);
603#endif
604
2059b854 605 if (pmap == NULL)
8f961915
KM
606 return;
607
608#ifdef DEBUG
609 remove_stats.calls++;
610#endif
611 for (va = sva; va < eva; va += PAGE_SIZE) {
612 /*
613 * Weed out invalid mappings.
614 * Note: we assume that the segment table is always allocated.
615 */
9acfa6cd 616 if (!pmap_ste_v(pmap, va)) {
8f961915 617 /* XXX: avoid address wrap around */
9acfa6cd 618 if (va >= hp300_trunc_seg(VM_MAX_ADDRESS))
8f961915
KM
619 break;
620 va = hp300_round_seg(va + PAGE_SIZE) - PAGE_SIZE;
621 continue;
622 }
623 pte = pmap_pte(pmap, va);
624 pa = pmap_pte_pa(pte);
625 if (pa == 0)
626 continue;
627 /*
628 * Invalidating a non-CI page, must flush external VAC
629 * unless it is a supervisor mapping and we have already
630 * flushed the supervisor side.
631 */
632 if (pmap_aliasmask && !pmap_pte_ci(pte) &&
633 !(pmap == kernel_pmap && firstpage))
634 flushcache = TRUE;
635#ifdef DEBUG
636 opte = *pte;
637 remove_stats.removes++;
638#endif
639 /*
640 * Update statistics
641 */
642 if (pmap_pte_w(pte))
643 pmap->pm_stats.wired_count--;
644 pmap->pm_stats.resident_count--;
645
646 /*
647 * Invalidate the PTEs.
648 * XXX: should cluster them up and invalidate as many
649 * as possible at once.
650 */
651#ifdef DEBUG
652 if (pmapdebug & PDB_REMOVE)
9acfa6cd 653 printf("remove: invalidating pte at %x\n", pte);
8f961915
KM
654#endif
655 /*
656 * Flush VAC to ensure we get the correct state of any
657 * hardware maintained bits.
658 */
659 if (firstpage && pmap_aliasmask) {
660 firstpage = FALSE;
661 if (pmap == kernel_pmap)
662 flushcache = FALSE;
663 DCIS();
664#ifdef DEBUG
665 remove_stats.sflushes++;
666#endif
667 }
9acfa6cd
MH
668#if defined(DYNPGSIZE)
669 {
670 register int ix = 0;
8f961915 671
9acfa6cd
MH
672 bits = 0;
673 do {
674 bits |= *(int *)pte & (PG_U|PG_M);
675 *(int *)pte++ = PG_NV;
676 TBIS(va + ix * HP_PAGE_SIZE);
677 } while (++ix != hppagesperpage);
678 }
679#else
680 bits = *(int *)pte & (PG_U|PG_M);
681 *(int *)pte = PG_NV;
682 TBIS(va);
683#endif
8f961915
KM
684 /*
685 * For user mappings decrement the wiring count on
686 * the PT page. We do this after the PTE has been
687 * invalidated because vm_map_pageable winds up in
688 * pmap_pageable which clears the modify bit for the
689 * PT page.
690 */
691 if (pmap != kernel_pmap) {
692 pte = pmap_pte(pmap, va);
693 vm_map_pageable(pt_map, trunc_page(pte),
694 round_page(pte+1), TRUE);
695#ifdef DEBUG
696 if (pmapdebug & PDB_WIRING)
697 pmap_check_wiring("remove", trunc_page(pte));
698#endif
699 }
700 /*
701 * Remove from the PV table (raise IPL since we
702 * may be called at interrupt time).
703 */
704 if (pa < vm_first_phys || pa >= vm_last_phys)
705 continue;
706 pv = pa_to_pvh(pa);
707 ste = (int *)0;
708 s = splimp();
709 /*
710 * If it is the first entry on the list, it is actually
711 * in the header and we must copy the following entry up
712 * to the header. Otherwise we must search the list for
713 * the entry. In either case we free the now unused entry.
714 */
715 if (pmap == pv->pv_pmap && va == pv->pv_va) {
716 ste = (int *)pv->pv_ptste;
717 ptpmap = pv->pv_ptpmap;
718 npv = pv->pv_next;
719 if (npv) {
720 *pv = *npv;
721 free((caddr_t)npv, M_VMPVENT);
722 } else
2059b854 723 pv->pv_pmap = NULL;
8f961915
KM
724#ifdef DEBUG
725 remove_stats.pvfirst++;
726#endif
727 } else {
728 for (npv = pv->pv_next; npv; npv = npv->pv_next) {
729#ifdef DEBUG
730 remove_stats.pvsearch++;
731#endif
732 if (pmap == npv->pv_pmap && va == npv->pv_va)
733 break;
734 pv = npv;
735 }
736#ifdef DEBUG
2059b854 737 if (npv == NULL)
8f961915
KM
738 panic("pmap_remove: PA not in pv_tab");
739#endif
740 ste = (int *)npv->pv_ptste;
741 ptpmap = npv->pv_ptpmap;
742 pv->pv_next = npv->pv_next;
743 free((caddr_t)npv, M_VMPVENT);
744 pv = pa_to_pvh(pa);
745 }
746 /*
747 * If only one mapping left we no longer need to cache inhibit
748 */
749 if (pv->pv_pmap &&
2059b854 750 pv->pv_next == NULL && (pv->pv_flags & PV_CI)) {
8f961915
KM
751#ifdef DEBUG
752 if (pmapdebug & PDB_CACHE)
753 printf("remove: clearing CI for pa %x\n", pa);
754#endif
755 pv->pv_flags &= ~PV_CI;
756 pmap_changebit(pa, PG_CI, FALSE);
757#ifdef DEBUG
758 if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==
759 (PDB_CACHE|PDB_PVDUMP))
760 pmap_pvdump(pa);
761#endif
762 }
763
764 /*
765 * If this was a PT page we must also remove the
766 * mapping from the associated segment table.
767 */
768 if (ste) {
769#ifdef DEBUG
770 remove_stats.ptinvalid++;
771 if (pmapdebug & (PDB_REMOVE|PDB_PTPAGE)) {
772 printf("remove: ste was %x@%x pte was %x@%x\n",
773 *ste, ste,
774 *(int *)&opte, pmap_pte(pmap, va));
775 }
9acfa6cd
MH
776#endif
777#if defined(HP380)
778 if (mmutype == MMU_68040) {
779 int *este = &ste[NPTEPG/SG4_LEV3SIZE];
780
781 while (ste < este)
782 *ste++ = SG_NV;
783 } else
8f961915
KM
784#endif
785 *ste = SG_NV;
786 /*
787 * If it was a user PT page, we decrement the
788 * reference count on the segment table as well,
789 * freeing it if it is now empty.
790 */
791 if (ptpmap != kernel_pmap) {
792#ifdef DEBUG
793 if (pmapdebug & (PDB_REMOVE|PDB_SEGTAB))
794 printf("remove: stab %x, refcnt %d\n",
795 ptpmap->pm_stab,
796 ptpmap->pm_sref - 1);
797 if ((pmapdebug & PDB_PARANOIA) &&
2059b854 798 ptpmap->pm_stab != (st_entry_t *)trunc_page(ste))
8f961915
KM
799 panic("remove: bogus ste");
800#endif
801 if (--(ptpmap->pm_sref) == 0) {
802#ifdef DEBUG
803 if (pmapdebug&(PDB_REMOVE|PDB_SEGTAB))
804 printf("remove: free stab %x\n",
805 ptpmap->pm_stab);
806#endif
807 kmem_free(kernel_map,
808 (vm_offset_t)ptpmap->pm_stab,
809 HP_STSIZE);
810 ptpmap->pm_stab = Segtabzero;
9acfa6cd
MH
811 ptpmap->pm_stpa = Segtabzeropa;
812#if defined(HP380)
813 if (mmutype == MMU_68040)
814 ptpmap->pm_stfree = protostfree;
815#endif
8f961915
KM
816 ptpmap->pm_stchanged = TRUE;
817 /*
818 * XXX may have changed segment table
819 * pointer for current process so
820 * update now to reload hardware.
821 */
2059b854 822 if (ptpmap == curproc->p_vmspace->vm_map.pmap)
8f961915 823 PMAP_ACTIVATE(ptpmap,
2059b854 824 (struct pcb *)curproc->p_addr, 1);
8f961915
KM
825 }
826 }
827 if (ptpmap == kernel_pmap)
828 TBIAS();
829 else
830 TBIAU();
831 pv->pv_flags &= ~PV_PTPAGE;
832 ptpmap->pm_ptpages--;
833 }
834 /*
835 * Update saved attributes for managed page
836 */
837 pmap_attributes[pa_index(pa)] |= bits;
838 splx(s);
839 }
840#ifdef DEBUG
841 if (pmapvacflush & PVF_REMOVE) {
842 if (pmapvacflush & PVF_TOTAL)
843 DCIA();
844 else if (pmap == kernel_pmap)
845 DCIS();
846 else
847 DCIU();
848 }
849#endif
850 if (flushcache) {
851 if (pmap == kernel_pmap) {
852 DCIS();
853#ifdef DEBUG
854 remove_stats.sflushes++;
855#endif
856 } else {
857 DCIU();
858#ifdef DEBUG
859 remove_stats.uflushes++;
860#endif
861 }
862 }
863}
864
865/*
4bc66f7c
MH
866 * pmap_page_protect:
867 *
868 * Lower the permission for all mappings to a given page.
8f961915
KM
869 */
870void
4bc66f7c
MH
871pmap_page_protect(pa, prot)
872 vm_offset_t pa;
873 vm_prot_t prot;
8f961915
KM
874{
875 register pv_entry_t pv;
876 int s;
877
878#ifdef DEBUG
4bc66f7c
MH
879 if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
880 prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE))
881 printf("pmap_page_protect(%x, %x)\n", pa, prot);
8f961915 882#endif
8f961915
KM
883 if (pa < vm_first_phys || pa >= vm_last_phys)
884 return;
885
4bc66f7c
MH
886 switch (prot) {
887 case VM_PROT_ALL:
888 break;
889 /* copy_on_write */
890 case VM_PROT_READ:
891 case VM_PROT_READ|VM_PROT_EXECUTE:
892 pmap_changebit(pa, PG_RO, TRUE);
893 break;
894 /* remove_all */
895 default:
896 pv = pa_to_pvh(pa);
897 s = splimp();
898 while (pv->pv_pmap != NULL) {
8f961915 899#ifdef DEBUG
9acfa6cd 900 if (!pmap_ste_v(pv->pv_pmap, pv->pv_va) ||
4bc66f7c
MH
901 pmap_pte_pa(pmap_pte(pv->pv_pmap,pv->pv_va)) != pa)
902 panic("pmap_page_protect: bad mapping");
8f961915 903#endif
4bc66f7c
MH
904 pmap_remove(pv->pv_pmap, pv->pv_va,
905 pv->pv_va + PAGE_SIZE);
906 }
907 splx(s);
908 break;
8f961915 909 }
8f961915
KM
910}
911
912/*
913 * Set the physical protection on the
914 * specified range of this map as requested.
915 */
916void
917pmap_protect(pmap, sva, eva, prot)
918 register pmap_t pmap;
919 vm_offset_t sva, eva;
920 vm_prot_t prot;
921{
922 register pt_entry_t *pte;
923 register vm_offset_t va;
8f961915
KM
924 int hpprot;
925 boolean_t firstpage = TRUE;
926
927#ifdef DEBUG
928 if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
929 printf("pmap_protect(%x, %x, %x, %x)\n", pmap, sva, eva, prot);
9acfa6cd 930 protect_stats.calls++;
8f961915 931#endif
2059b854 932 if (pmap == NULL)
8f961915
KM
933 return;
934
935 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
936 pmap_remove(pmap, sva, eva);
937 return;
938 }
939 if (prot & VM_PROT_WRITE)
940 return;
941
942 pte = pmap_pte(pmap, sva);
943 hpprot = pte_prot(pmap, prot) == PG_RO ? 1 : 0;
944 for (va = sva; va < eva; va += PAGE_SIZE) {
945 /*
946 * Page table page is not allocated.
947 * Skip it, we don't want to force allocation
948 * of unnecessary PTE pages just to set the protection.
949 */
9acfa6cd 950 if (!pmap_ste_v(pmap, va)) {
8f961915
KM
951 /* XXX: avoid address wrap around */
952 if (va >= hp300_trunc_seg((vm_offset_t)-1))
953 break;
954 va = hp300_round_seg(va + PAGE_SIZE) - PAGE_SIZE;
9acfa6cd
MH
955#if defined(DYNPGSIZE)
956 pte = pmap_pte(pmap, va) + hppagesperpage;
957#else
958 pte = pmap_pte(pmap, va) + 1;
959#endif
8f961915
KM
960 continue;
961 }
962 /*
963 * Page not valid. Again, skip it.
964 * Should we do this? Or set protection anyway?
965 */
966 if (!pmap_pte_v(pte)) {
9acfa6cd 967#if defined(DYNPGSIZE)
8f961915 968 pte += hppagesperpage;
9acfa6cd
MH
969#else
970 pte++;
971#endif
8f961915
KM
972 continue;
973 }
974 /*
9acfa6cd
MH
975 * Purge kernel side of VAC to ensure we get correct state
976 * of HW bits so we don't clobber them.
8f961915
KM
977 */
978 if (firstpage && pmap_aliasmask) {
979 firstpage = FALSE;
980 DCIS();
981 }
9acfa6cd
MH
982#if defined(DYNPGSIZE)
983 {
984 register int ix = 0;
985
986 do {
987 /*
988 * Clear caches as necessary if making RO.
989 * XXX clear VAC? Doesn't seem to be needed.
990 */
991#if defined(HP380)
992 if (hpprot && !pmap_pte_prot(pte)) {
993 vm_offset_t pa = pmap_pte_pa(pte);
994
995 if (mmutype == MMU_68040) {
996 DCFP(pa);
997 ICPP(pa);
998 }
999 }
1000#endif
1001#ifdef DEBUG
1002 protect_stats.pages++;
1003 if (hpprot && pmap_pte_prot(pte))
1004 protect_stats.alreadyro++;
1005 if (!hpprot && !pmap_pte_prot(pte))
1006 protect_stats.alreadyrw++;
1007#endif
1008 pmap_pte_set_prot(pte++, hpprot);
1009 TBIS(va + ix * HP_PAGE_SIZE);
1010 } while (++ix != hppagesperpage);
1011 }
1012#else
1013 /*
1014 * Clear caches as necessary if making RO.
1015 * XXX clear VAC? Doesn't seem to be needed.
1016 */
1017#if defined(HP380)
1018 if (hpprot && !pmap_pte_prot(pte)) {
1019 vm_offset_t pa = pmap_pte_pa(pte);
1020
1021 if (mmutype == MMU_68040) {
1022 DCFP(pa);
1023 ICPP(pa);
1024 }
1025 }
1026#endif
1027#ifdef DEBUG
1028 protect_stats.pages++;
1029 if (hpprot && pmap_pte_prot(pte))
1030 protect_stats.alreadyro++;
1031 if (!hpprot && !pmap_pte_prot(pte))
1032 protect_stats.alreadyrw++;
1033#endif
1034 pmap_pte_set_prot(pte++, hpprot);
1035 TBIS(va);
1036#endif
8f961915
KM
1037 }
1038#ifdef DEBUG
1039 if (hpprot && (pmapvacflush & PVF_PROTECT)) {
1040 if (pmapvacflush & PVF_TOTAL)
1041 DCIA();
1042 else if (pmap == kernel_pmap)
1043 DCIS();
1044 else
1045 DCIU();
1046 }
1047#endif
1048}
1049
1050/*
1051 * Insert the given physical page (p) at
1052 * the specified virtual address (v) in the
1053 * target physical map with the protection requested.
1054 *
1055 * If specified, the page will be wired down, meaning
1056 * that the related pte can not be reclaimed.
1057 *
1058 * NB: This is the only routine which MAY NOT lazy-evaluate
1059 * or lose information. That is, this routine must actually
1060 * insert this page into the given map NOW.
1061 */
1062void
1063pmap_enter(pmap, va, pa, prot, wired)
1064 register pmap_t pmap;
1065 vm_offset_t va;
1066 register vm_offset_t pa;
1067 vm_prot_t prot;
1068 boolean_t wired;
1069{
1070 register pt_entry_t *pte;
9acfa6cd 1071 register int npte;
8f961915
KM
1072 vm_offset_t opa;
1073 boolean_t cacheable = TRUE;
1074 boolean_t checkpv = TRUE;
1075
1076#ifdef DEBUG
1077 if (pmapdebug & (PDB_FOLLOW|PDB_ENTER))
1078 printf("pmap_enter(%x, %x, %x, %x, %x)\n",
1079 pmap, va, pa, prot, wired);
1080#endif
2059b854 1081 if (pmap == NULL)
8f961915
KM
1082 return;
1083
1084#ifdef DEBUG
1085 if (pmap == kernel_pmap)
1086 enter_stats.kernel++;
1087 else
1088 enter_stats.user++;
1089#endif
1090 /*
1091 * For user mapping, allocate kernel VM resources if necessary.
1092 */
2059b854 1093 if (pmap->pm_ptab == NULL)
8f961915
KM
1094 pmap->pm_ptab = (pt_entry_t *)
1095 kmem_alloc_wait(pt_map, HP_MAX_PTSIZE);
1096
1097 /*
1098 * Segment table entry not valid, we need a new PT page
1099 */
9acfa6cd 1100 if (!pmap_ste_v(pmap, va))
8f961915
KM
1101 pmap_enter_ptpage(pmap, va);
1102
1103 pte = pmap_pte(pmap, va);
1104 opa = pmap_pte_pa(pte);
1105#ifdef DEBUG
1106 if (pmapdebug & PDB_ENTER)
1107 printf("enter: pte %x, *pte %x\n", pte, *(int *)pte);
1108#endif
1109
1110 /*
1111 * Mapping has not changed, must be protection or wiring change.
1112 */
1113 if (opa == pa) {
1114#ifdef DEBUG
1115 enter_stats.pwchange++;
1116#endif
1117 /*
1118 * Wiring change, just update stats.
1119 * We don't worry about wiring PT pages as they remain
1120 * resident as long as there are valid mappings in them.
1121 * Hence, if a user page is wired, the PT page will be also.
1122 */
1123 if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) {
1124#ifdef DEBUG
1125 if (pmapdebug & PDB_ENTER)
1126 printf("enter: wiring change -> %x\n", wired);
1127#endif
1128 if (wired)
1129 pmap->pm_stats.wired_count++;
1130 else
1131 pmap->pm_stats.wired_count--;
1132#ifdef DEBUG
1133 enter_stats.wchange++;
1134#endif
1135 }
1136 /*
1137 * Retain cache inhibition status
1138 */
1139 checkpv = FALSE;
1140 if (pmap_pte_ci(pte))
1141 cacheable = FALSE;
1142 goto validate;
1143 }
1144
1145 /*
1146 * Mapping has changed, invalidate old range and fall through to
1147 * handle validating new mapping.
1148 */
1149 if (opa) {
1150#ifdef DEBUG
1151 if (pmapdebug & PDB_ENTER)
1152 printf("enter: removing old mapping %x\n", va);
1153#endif
1154 pmap_remove(pmap, va, va + PAGE_SIZE);
1155#ifdef DEBUG
1156 enter_stats.mchange++;
1157#endif
1158 }
1159
1160 /*
1161 * If this is a new user mapping, increment the wiring count
1162 * on this PT page. PT pages are wired down as long as there
1163 * is a valid mapping in the page.
1164 */
1165 if (pmap != kernel_pmap)
1166 vm_map_pageable(pt_map, trunc_page(pte),
1167 round_page(pte+1), FALSE);
1168
1169 /*
1170 * Enter on the PV list if part of our managed memory
1171 * Note that we raise IPL while manipulating pv_table
1172 * since pmap_enter can be called at interrupt time.
1173 */
1174 if (pa >= vm_first_phys && pa < vm_last_phys) {
1175 register pv_entry_t pv, npv;
1176 int s;
1177
1178#ifdef DEBUG
1179 enter_stats.managed++;
1180#endif
1181 pv = pa_to_pvh(pa);
1182 s = splimp();
1183#ifdef DEBUG
1184 if (pmapdebug & PDB_ENTER)
1185 printf("enter: pv at %x: %x/%x/%x\n",
1186 pv, pv->pv_va, pv->pv_pmap, pv->pv_next);
1187#endif
1188 /*
1189 * No entries yet, use header as the first entry
1190 */
2059b854 1191 if (pv->pv_pmap == NULL) {
8f961915
KM
1192#ifdef DEBUG
1193 enter_stats.firstpv++;
1194#endif
1195 pv->pv_va = va;
1196 pv->pv_pmap = pmap;
2059b854
MK
1197 pv->pv_next = NULL;
1198 pv->pv_ptste = NULL;
1199 pv->pv_ptpmap = NULL;
8f961915
KM
1200 pv->pv_flags = 0;
1201 }
1202 /*
1203 * There is at least one other VA mapping this page.
1204 * Place this entry after the header.
1205 */
1206 else {
1207#ifdef DEBUG
1208 for (npv = pv; npv; npv = npv->pv_next)
1209 if (pmap == npv->pv_pmap && va == npv->pv_va)
1210 panic("pmap_enter: already in pv_tab");
1211#endif
1212 npv = (pv_entry_t)
1213 malloc(sizeof *npv, M_VMPVENT, M_NOWAIT);
1214 npv->pv_va = va;
1215 npv->pv_pmap = pmap;
1216 npv->pv_next = pv->pv_next;
2059b854
MK
1217 npv->pv_ptste = NULL;
1218 npv->pv_ptpmap = NULL;
8f961915
KM
1219 pv->pv_next = npv;
1220#ifdef DEBUG
1221 if (!npv->pv_next)
1222 enter_stats.secondpv++;
1223#endif
1224 /*
1225 * Since there is another logical mapping for the
1226 * same page we may need to cache-inhibit the
1227 * descriptors on those CPUs with external VACs.
1228 * We don't need to CI if:
1229 *
1230 * - No two mappings belong to the same user pmaps.
1231 * Since the cache is flushed on context switches
1232 * there is no problem between user processes.
1233 *
1234 * - Mappings within a single pmap are a certain
1235 * magic distance apart. VAs at these appropriate
1236 * boundaries map to the same cache entries or
1237 * otherwise don't conflict.
1238 *
1239 * To keep it simple, we only check for these special
1240 * cases if there are only two mappings, otherwise we
1241 * punt and always CI.
1242 *
1243 * Note that there are no aliasing problems with the
1244 * on-chip data-cache when the WA bit is set.
1245 */
1246 if (pmap_aliasmask) {
1247 if (pv->pv_flags & PV_CI) {
1248#ifdef DEBUG
1249 if (pmapdebug & PDB_CACHE)
1250 printf("enter: pa %x already CI'ed\n",
1251 pa);
1252#endif
1253 checkpv = cacheable = FALSE;
1254 } else if (npv->pv_next ||
1255 ((pmap == pv->pv_pmap ||
1256 pmap == kernel_pmap ||
1257 pv->pv_pmap == kernel_pmap) &&
1258 ((pv->pv_va & pmap_aliasmask) !=
1259 (va & pmap_aliasmask)))) {
1260#ifdef DEBUG
1261 if (pmapdebug & PDB_CACHE)
1262 printf("enter: pa %x CI'ing all\n",
1263 pa);
1264#endif
1265 cacheable = FALSE;
1266 pv->pv_flags |= PV_CI;
1267#ifdef DEBUG
1268 enter_stats.ci++;
1269#endif
1270 }
1271 }
1272 }
1273 splx(s);
1274 }
1275 /*
1276 * Assumption: if it is not part of our managed memory
1277 * then it must be device memory which may be volitile.
1278 */
1279 else if (pmap_initialized) {
1280 checkpv = cacheable = FALSE;
1281#ifdef DEBUG
1282 enter_stats.unmanaged++;
1283#endif
1284 }
1285
1286 /*
1287 * Increment counters
1288 */
1289 pmap->pm_stats.resident_count++;
1290 if (wired)
1291 pmap->pm_stats.wired_count++;
1292
1293validate:
1294 /*
9acfa6cd
MH
1295 * Purge kernel side of VAC to ensure we get correct state
1296 * of HW bits so we don't clobber them.
8f961915
KM
1297 */
1298 if (pmap_aliasmask)
1299 DCIS();
1300 /*
1301 * Now validate mapping with desired protection/wiring.
1302 * Assume uniform modified and referenced status for all
1303 * HP pages in a MACH page.
1304 */
1305 npte = (pa & PG_FRAME) | pte_prot(pmap, prot) | PG_V;
1306 npte |= (*(int *)pte & (PG_M|PG_U));
1307 if (wired)
1308 npte |= PG_W;
1309 if (!checkpv && !cacheable)
1310 npte |= PG_CI;
9acfa6cd
MH
1311#if defined(HP380)
1312 if (mmutype == MMU_68040 && (npte & (PG_PROT|PG_CI)) == PG_RW)
1313#ifdef DEBUG
1314 if (dowriteback && (dokwriteback || pmap != kernel_pmap))
1315#endif
1316 npte |= PG_CCB;
1317#endif
8f961915
KM
1318#ifdef DEBUG
1319 if (pmapdebug & PDB_ENTER)
1320 printf("enter: new pte value %x\n", npte);
1321#endif
9acfa6cd
MH
1322#if defined(DYNPGSIZE)
1323 {
1324 register int ix = 0;
1325
1326 do {
1327#if defined(HP380)
1328 if (mmutype == MMU_68040) {
1329 DCFP(pa);
1330 ICPP(pa);
1331 }
1332#endif
1333 *(int *)pte++ = npte;
1334 TBIS(va);
1335 npte += HP_PAGE_SIZE;
1336 va += HP_PAGE_SIZE;
1337 } while (++ix != hppagesperpage);
1338 }
1339#else
1340#if defined(HP380)
1341 if (mmutype == MMU_68040) {
1342 DCFP(pa);
1343 ICPP(pa);
1344 }
1345#endif
1346 *(int *)pte = npte;
1347 TBIS(va);
1348#endif
8f961915
KM
1349 /*
1350 * The following is executed if we are entering a second
1351 * (or greater) mapping for a physical page and the mappings
1352 * may create an aliasing problem. In this case we must
1353 * cache inhibit the descriptors involved and flush any
1354 * external VAC.
1355 */
1356 if (checkpv && !cacheable) {
1357 pmap_changebit(pa, PG_CI, TRUE);
1358 DCIA();
1359#ifdef DEBUG
1360 enter_stats.flushes++;
1361#endif
1362#ifdef DEBUG
1363 if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==
1364 (PDB_CACHE|PDB_PVDUMP))
1365 pmap_pvdump(pa);
1366#endif
1367 }
1368#ifdef DEBUG
1369 else if (pmapvacflush & PVF_ENTER) {
1370 if (pmapvacflush & PVF_TOTAL)
1371 DCIA();
1372 else if (pmap == kernel_pmap)
1373 DCIS();
1374 else
1375 DCIU();
1376 }
1377 if ((pmapdebug & PDB_WIRING) && pmap != kernel_pmap) {
9acfa6cd 1378#if defined(DYNPGSIZE)
8f961915 1379 va -= PAGE_SIZE;
9acfa6cd 1380#endif
8f961915
KM
1381 pmap_check_wiring("enter", trunc_page(pmap_pte(pmap, va)));
1382 }
1383#endif
1384}
1385
1386/*
1387 * Routine: pmap_change_wiring
1388 * Function: Change the wiring attribute for a map/virtual-address
1389 * pair.
1390 * In/out conditions:
1391 * The mapping must already exist in the pmap.
1392 */
1393void
1394pmap_change_wiring(pmap, va, wired)
1395 register pmap_t pmap;
1396 vm_offset_t va;
1397 boolean_t wired;
1398{
1399 register pt_entry_t *pte;
8f961915
KM
1400
1401#ifdef DEBUG
1402 if (pmapdebug & PDB_FOLLOW)
1403 printf("pmap_change_wiring(%x, %x, %x)\n", pmap, va, wired);
1404#endif
2059b854 1405 if (pmap == NULL)
8f961915
KM
1406 return;
1407
1408 pte = pmap_pte(pmap, va);
1409#ifdef DEBUG
1410 /*
1411 * Page table page is not allocated.
1412 * Should this ever happen? Ignore it for now,
1413 * we don't want to force allocation of unnecessary PTE pages.
1414 */
9acfa6cd 1415 if (!pmap_ste_v(pmap, va)) {
8f961915
KM
1416 if (pmapdebug & PDB_PARANOIA)
1417 printf("pmap_change_wiring: invalid STE for %x\n", va);
1418 return;
1419 }
1420 /*
1421 * Page not valid. Should this ever happen?
1422 * Just continue and change wiring anyway.
1423 */
1424 if (!pmap_pte_v(pte)) {
1425 if (pmapdebug & PDB_PARANOIA)
1426 printf("pmap_change_wiring: invalid PTE for %x\n", va);
1427 }
1428#endif
1429 if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) {
1430 if (wired)
1431 pmap->pm_stats.wired_count++;
1432 else
1433 pmap->pm_stats.wired_count--;
1434 }
1435 /*
1436 * Wiring is not a hardware characteristic so there is no need
1437 * to invalidate TLB.
1438 */
9acfa6cd
MH
1439#if defined(DYNPGSIZE)
1440 {
1441 register int ix = 0;
1442
1443 do {
1444 pmap_pte_set_w(pte++, wired);
1445 } while (++ix != hppagesperpage);
1446 }
1447#else
1448 pmap_pte_set_w(pte, wired);
1449#endif
8f961915
KM
1450}
1451
1452/*
1453 * Routine: pmap_extract
1454 * Function:
1455 * Extract the physical page address associated
1456 * with the given map/virtual_address pair.
1457 */
1458
1459vm_offset_t
1460pmap_extract(pmap, va)
1461 register pmap_t pmap;
1462 vm_offset_t va;
1463{
1464 register vm_offset_t pa;
1465
1466#ifdef DEBUG
1467 if (pmapdebug & PDB_FOLLOW)
1468 printf("pmap_extract(%x, %x) -> ", pmap, va);
1469#endif
1470 pa = 0;
9acfa6cd 1471 if (pmap && pmap_ste_v(pmap, va))
8f961915
KM
1472 pa = *(int *)pmap_pte(pmap, va);
1473 if (pa)
1474 pa = (pa & PG_FRAME) | (va & ~PG_FRAME);
1475#ifdef DEBUG
1476 if (pmapdebug & PDB_FOLLOW)
1477 printf("%x\n", pa);
1478#endif
1479 return(pa);
1480}
1481
1482/*
1483 * Copy the range specified by src_addr/len
1484 * from the source map to the range dst_addr/len
1485 * in the destination map.
1486 *
1487 * This routine is only advisory and need not do anything.
1488 */
1489void pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
1490 pmap_t dst_pmap;
1491 pmap_t src_pmap;
1492 vm_offset_t dst_addr;
1493 vm_size_t len;
1494 vm_offset_t src_addr;
1495{
1496#ifdef DEBUG
1497 if (pmapdebug & PDB_FOLLOW)
1498 printf("pmap_copy(%x, %x, %x, %x, %x)\n",
1499 dst_pmap, src_pmap, dst_addr, len, src_addr);
1500#endif
1501}
1502
1503/*
1504 * Require that all active physical maps contain no
1505 * incorrect entries NOW. [This update includes
1506 * forcing updates of any address map caching.]
1507 *
1508 * Generally used to insure that a thread about
1509 * to run will see a semantically correct world.
1510 */
1511void pmap_update()
1512{
1513#ifdef DEBUG
1514 if (pmapdebug & PDB_FOLLOW)
1515 printf("pmap_update()\n");
1516#endif
1517 TBIA();
1518}
1519
1520/*
1521 * Routine: pmap_collect
1522 * Function:
1523 * Garbage collects the physical map system for
1524 * pages which are no longer used.
1525 * Success need not be guaranteed -- that is, there
1526 * may well be pages which are not referenced, but
1527 * others may be collected.
1528 * Usage:
1529 * Called by the pageout daemon when pages are scarce.
1530 */
1531void
1532pmap_collect(pmap)
1533 pmap_t pmap;
1534{
1535 register vm_offset_t pa;
1536 register pv_entry_t pv;
1537 register int *pte;
1538 vm_offset_t kpa;
1539 int s;
1540
1541#ifdef DEBUG
1542 int *ste;
1543 int opmapdebug;
1544#endif
1545 if (pmap != kernel_pmap)
1546 return;
1547
1548#ifdef DEBUG
1549 if (pmapdebug & PDB_FOLLOW)
1550 printf("pmap_collect(%x)\n", pmap);
1551 kpt_stats.collectscans++;
1552#endif
1553 s = splimp();
1554 for (pa = vm_first_phys; pa < vm_last_phys; pa += PAGE_SIZE) {
1555 register struct kpt_page *kpt, **pkpt;
1556
1557 /*
1558 * Locate physical pages which are being used as kernel
1559 * page table pages.
1560 */
1561 pv = pa_to_pvh(pa);
1562 if (pv->pv_pmap != kernel_pmap || !(pv->pv_flags & PV_PTPAGE))
1563 continue;
1564 do {
1565 if (pv->pv_ptste && pv->pv_ptpmap == kernel_pmap)
1566 break;
1567 } while (pv = pv->pv_next);
2059b854 1568 if (pv == NULL)
8f961915
KM
1569 continue;
1570#ifdef DEBUG
1571 if (pv->pv_va < (vm_offset_t)Sysmap ||
1572 pv->pv_va >= (vm_offset_t)Sysmap + HP_MAX_PTSIZE)
1573 printf("collect: kernel PT VA out of range\n");
1574 else
1575 goto ok;
1576 pmap_pvdump(pa);
1577 continue;
1578ok:
1579#endif
1580 pte = (int *)(pv->pv_va + HP_PAGE_SIZE);
1581 while (--pte >= (int *)pv->pv_va && *pte == PG_NV)
1582 ;
1583 if (pte >= (int *)pv->pv_va)
1584 continue;
1585
1586#ifdef DEBUG
1587 if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT)) {
1588 printf("collect: freeing KPT page at %x (ste %x@%x)\n",
1589 pv->pv_va, *(int *)pv->pv_ptste, pv->pv_ptste);
1590 opmapdebug = pmapdebug;
1591 pmapdebug |= PDB_PTPAGE;
1592 }
1593
1594 ste = (int *)pv->pv_ptste;
1595#endif
1596 /*
1597 * If all entries were invalid we can remove the page.
1598 * We call pmap_remove to take care of invalidating ST
1599 * and Sysptmap entries.
1600 */
1601 kpa = pmap_extract(pmap, pv->pv_va);
1602 pmap_remove(pmap, pv->pv_va, pv->pv_va + HP_PAGE_SIZE);
1603 /*
1604 * Use the physical address to locate the original
1605 * (kmem_alloc assigned) address for the page and put
1606 * that page back on the free list.
1607 */
1608 for (pkpt = &kpt_used_list, kpt = *pkpt;
1609 kpt != (struct kpt_page *)0;
1610 pkpt = &kpt->kpt_next, kpt = *pkpt)
1611 if (kpt->kpt_pa == kpa)
1612 break;
1613#ifdef DEBUG
1614 if (kpt == (struct kpt_page *)0)
1615 panic("pmap_collect: lost a KPT page");
1616 if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
1617 printf("collect: %x (%x) to free list\n",
1618 kpt->kpt_va, kpa);
1619#endif
1620 *pkpt = kpt->kpt_next;
1621 kpt->kpt_next = kpt_free_list;
1622 kpt_free_list = kpt;
1623#ifdef DEBUG
1624 kpt_stats.kptinuse--;
1625 kpt_stats.collectpages++;
1626 if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
1627 pmapdebug = opmapdebug;
1628
1629 if (*ste)
1630 printf("collect: kernel STE at %x still valid (%x)\n",
1631 ste, *ste);
1632 ste = (int *)&Sysptmap[(st_entry_t *)ste-pmap_ste(kernel_pmap, 0)];
1633 if (*ste)
1634 printf("collect: kernel PTmap at %x still valid (%x)\n",
1635 ste, *ste);
1636#endif
1637 }
1638 splx(s);
1639}
1640
1641void
1642pmap_activate(pmap, pcbp)
1643 register pmap_t pmap;
1644 struct pcb *pcbp;
1645{
1646#ifdef DEBUG
1647 if (pmapdebug & (PDB_FOLLOW|PDB_SEGTAB))
1648 printf("pmap_activate(%x, %x)\n", pmap, pcbp);
1649#endif
2059b854 1650 PMAP_ACTIVATE(pmap, pcbp, pmap == curproc->p_vmspace->vm_map.pmap);
8f961915
KM
1651}
1652
8f961915
KM
1653/*
1654 * pmap_zero_page zeros the specified (machine independent)
1655 * page by mapping the page into virtual memory and using
1656 * bzero to clear its contents, one machine dependent page
1657 * at a time.
9acfa6cd
MH
1658 *
1659 * XXX this is a bad implementation for virtual cache machines
1660 * (320/350) because pmap_enter doesn't cache-inhibit the temporary
1661 * kernel mapping and we wind up with data cached for that KVA.
1662 * It is probably a win for physical cache machines (370/380)
1663 * as the cache loading is not wasted.
8f961915 1664 */
21b1e496 1665void
8f961915 1666pmap_zero_page(phys)
9acfa6cd 1667 vm_offset_t phys;
8f961915 1668{
9acfa6cd
MH
1669 register vm_offset_t kva;
1670 extern caddr_t CADDR1;
8f961915
KM
1671
1672#ifdef DEBUG
1673 if (pmapdebug & PDB_FOLLOW)
1674 printf("pmap_zero_page(%x)\n", phys);
1675#endif
9acfa6cd
MH
1676 kva = (vm_offset_t) CADDR1;
1677#if defined(DYNPGSIZE)
1678 {
1679 register int ix = 0;
1680
1681 do {
1682 pmap_enter(kernel_pmap, kva, phys,
1683 VM_PROT_READ|VM_PROT_WRITE, TRUE);
1684 bzero((caddr_t)kva, HP_PAGE_SIZE);
1685 pmap_remove(kernel_pmap, kva, kva+HP_PAGE_SIZE);
1686 phys += HP_PAGE_SIZE;
1687 } while (++ix != hppagesperpage);
1688 }
1689#else
1690 pmap_enter(kernel_pmap, kva, phys, VM_PROT_READ|VM_PROT_WRITE, TRUE);
1691 bzero((caddr_t)kva, HP_PAGE_SIZE);
1692 pmap_remove(kernel_pmap, kva, kva+PAGE_SIZE);
1693#endif
8f961915
KM
1694}
1695
1696/*
1697 * pmap_copy_page copies the specified (machine independent)
1698 * page by mapping the page into virtual memory and using
1699 * bcopy to copy the page, one machine dependent page at a
1700 * time.
9acfa6cd
MH
1701 *
1702 *
1703 * XXX this is a bad implementation for virtual cache machines
1704 * (320/350) because pmap_enter doesn't cache-inhibit the temporary
1705 * kernel mapping and we wind up with data cached for that KVA.
1706 * It is probably a win for physical cache machines (370/380)
1707 * as the cache loading is not wasted.
8f961915 1708 */
21b1e496 1709void
8f961915 1710pmap_copy_page(src, dst)
9acfa6cd 1711 vm_offset_t src, dst;
8f961915 1712{
9acfa6cd
MH
1713 register vm_offset_t skva, dkva;
1714 extern caddr_t CADDR1, CADDR2;
8f961915
KM
1715
1716#ifdef DEBUG
1717 if (pmapdebug & PDB_FOLLOW)
1718 printf("pmap_copy_page(%x, %x)\n", src, dst);
1719#endif
9acfa6cd
MH
1720 skva = (vm_offset_t) CADDR1;
1721 dkva = (vm_offset_t) CADDR2;
1722#if defined(DYNPGSIZE)
1723 {
1724 register int ix = 0;
8f961915 1725
9acfa6cd
MH
1726 do {
1727 pmap_enter(kernel_pmap, skva, src, VM_PROT_READ, TRUE);
1728 pmap_enter(kernel_pmap, dkva, dst,
1729 VM_PROT_READ|VM_PROT_WRITE, TRUE);
1730 bcopy((caddr_t)skva, (caddr_t)dkva, PAGE_SIZE);
1731 /* CADDR1 and CADDR2 are virtually contiguous */
1732 pmap_remove(kernel_pmap, skva, skva+2*HP_PAGE_SIZE);
1733 src += HP_PAGE_SIZE;
1734 dst += HP_PAGE_SIZE;
1735 } while (++ix != hppagesperpage);
1736 }
1737#else
1738 pmap_enter(kernel_pmap, skva, src, VM_PROT_READ, TRUE);
1739 pmap_enter(kernel_pmap, dkva, dst, VM_PROT_READ|VM_PROT_WRITE, TRUE);
1740 bcopy((caddr_t)skva, (caddr_t)dkva, PAGE_SIZE);
1741 /* CADDR1 and CADDR2 are virtually contiguous */
1742 pmap_remove(kernel_pmap, skva, skva+2*PAGE_SIZE);
1743#endif
1744}
8f961915
KM
1745
1746/*
1747 * Routine: pmap_pageable
1748 * Function:
1749 * Make the specified pages (by pmap, offset)
1750 * pageable (or not) as requested.
1751 *
1752 * A page which is not pageable may not take
1753 * a fault; therefore, its page table entry
1754 * must remain valid for the duration.
1755 *
1756 * This routine is merely advisory; pmap_enter
1757 * will specify that these pages are to be wired
1758 * down (or not) as appropriate.
1759 */
21b1e496 1760void
8f961915
KM
1761pmap_pageable(pmap, sva, eva, pageable)
1762 pmap_t pmap;
1763 vm_offset_t sva, eva;
1764 boolean_t pageable;
1765{
1766#ifdef DEBUG
1767 if (pmapdebug & PDB_FOLLOW)
1768 printf("pmap_pageable(%x, %x, %x, %x)\n",
1769 pmap, sva, eva, pageable);
1770#endif
1771 /*
1772 * If we are making a PT page pageable then all valid
1773 * mappings must be gone from that page. Hence it should
1774 * be all zeros and there is no need to clean it.
1775 * Assumptions:
1776 * - we are called with only one page at a time
1777 * - PT pages have only one pv_table entry
1778 */
1779 if (pmap == kernel_pmap && pageable && sva + PAGE_SIZE == eva) {
1780 register pv_entry_t pv;
1781 register vm_offset_t pa;
1782
1783#ifdef DEBUG
1784 if ((pmapdebug & (PDB_FOLLOW|PDB_PTPAGE)) == PDB_PTPAGE)
1785 printf("pmap_pageable(%x, %x, %x, %x)\n",
1786 pmap, sva, eva, pageable);
1787#endif
9acfa6cd 1788 if (!pmap_ste_v(pmap, sva))
8f961915
KM
1789 return;
1790 pa = pmap_pte_pa(pmap_pte(pmap, sva));
1791 if (pa < vm_first_phys || pa >= vm_last_phys)
1792 return;
1793 pv = pa_to_pvh(pa);
2059b854 1794 if (pv->pv_ptste == NULL)
8f961915
KM
1795 return;
1796#ifdef DEBUG
1797 if (pv->pv_va != sva || pv->pv_next) {
1798 printf("pmap_pageable: bad PT page va %x next %x\n",
1799 pv->pv_va, pv->pv_next);
1800 return;
1801 }
1802#endif
1803 /*
1804 * Mark it unmodified to avoid pageout
1805 */
2059b854 1806 pmap_changebit(pa, PG_M, FALSE);
8f961915
KM
1807#ifdef DEBUG
1808 if (pmapdebug & PDB_PTPAGE)
1809 printf("pmap_pageable: PT page %x(%x) unmodified\n",
1810 sva, *(int *)pmap_pte(pmap, sva));
1811 if (pmapdebug & PDB_WIRING)
1812 pmap_check_wiring("pageable", sva);
1813#endif
1814 }
1815}
1816
1817/*
1818 * Clear the modify bits on the specified physical page.
1819 */
1820
1821void
1822pmap_clear_modify(pa)
1823 vm_offset_t pa;
1824{
1825#ifdef DEBUG
1826 if (pmapdebug & PDB_FOLLOW)
1827 printf("pmap_clear_modify(%x)\n", pa);
1828#endif
1829 pmap_changebit(pa, PG_M, FALSE);
1830}
1831
1832/*
1833 * pmap_clear_reference:
1834 *
1835 * Clear the reference bit on the specified physical page.
1836 */
1837
1838void pmap_clear_reference(pa)
1839 vm_offset_t pa;
1840{
1841#ifdef DEBUG
1842 if (pmapdebug & PDB_FOLLOW)
1843 printf("pmap_clear_reference(%x)\n", pa);
1844#endif
1845 pmap_changebit(pa, PG_U, FALSE);
1846}
1847
1848/*
1849 * pmap_is_referenced:
1850 *
1851 * Return whether or not the specified physical page is referenced
1852 * by any physical maps.
1853 */
1854
1855boolean_t
1856pmap_is_referenced(pa)
1857 vm_offset_t pa;
1858{
1859#ifdef DEBUG
1860 if (pmapdebug & PDB_FOLLOW) {
1861 boolean_t rv = pmap_testbit(pa, PG_U);
1862 printf("pmap_is_referenced(%x) -> %c\n", pa, "FT"[rv]);
1863 return(rv);
1864 }
1865#endif
1866 return(pmap_testbit(pa, PG_U));
1867}
1868
1869/*
1870 * pmap_is_modified:
1871 *
1872 * Return whether or not the specified physical page is modified
1873 * by any physical maps.
1874 */
1875
1876boolean_t
1877pmap_is_modified(pa)
1878 vm_offset_t pa;
1879{
1880#ifdef DEBUG
1881 if (pmapdebug & PDB_FOLLOW) {
1882 boolean_t rv = pmap_testbit(pa, PG_M);
1883 printf("pmap_is_modified(%x) -> %c\n", pa, "FT"[rv]);
1884 return(rv);
1885 }
1886#endif
1887 return(pmap_testbit(pa, PG_M));
1888}
1889
1890vm_offset_t
1891pmap_phys_address(ppn)
1892 int ppn;
1893{
1894 return(hp300_ptob(ppn));
1895}
1896
1897/*
1898 * Miscellaneous support routines follow
1899 */
1900
8f961915
KM
1901/* static */
1902boolean_t
1903pmap_testbit(pa, bit)
1904 register vm_offset_t pa;
1905 int bit;
1906{
1907 register pv_entry_t pv;
9acfa6cd 1908 register int *pte;
8f961915
KM
1909 int s;
1910
1911 if (pa < vm_first_phys || pa >= vm_last_phys)
1912 return(FALSE);
1913
1914 pv = pa_to_pvh(pa);
1915 s = splimp();
1916 /*
1917 * Check saved info first
1918 */
1919 if (pmap_attributes[pa_index(pa)] & bit) {
1920 splx(s);
1921 return(TRUE);
1922 }
1923 /*
1924 * Flush VAC to get correct state of any hardware maintained bits.
1925 */
1926 if (pmap_aliasmask && (bit & (PG_U|PG_M)))
1927 DCIS();
1928 /*
1929 * Not found, check current mappings returning
1930 * immediately if found.
1931 */
2059b854 1932 if (pv->pv_pmap != NULL) {
8f961915
KM
1933 for (; pv; pv = pv->pv_next) {
1934 pte = (int *) pmap_pte(pv->pv_pmap, pv->pv_va);
9acfa6cd
MH
1935#if defined(DYNPGSIZE)
1936 {
1937 register int ix = 0;
1938
1939 do {
1940 if (*pte++ & bit) {
1941 splx(s);
1942 return(TRUE);
1943 }
1944 } while (++ix != hppagesperpage);
1945 }
1946#else
1947 if (*pte & bit) {
1948 splx(s);
1949 return(TRUE);
1950 }
1951#endif
8f961915
KM
1952 }
1953 }
1954 splx(s);
1955 return(FALSE);
1956}
1957
1958/* static */
1959pmap_changebit(pa, bit, setem)
1960 register vm_offset_t pa;
1961 int bit;
1962 boolean_t setem;
1963{
1964 register pv_entry_t pv;
9acfa6cd 1965 register int *pte, npte;
8f961915
KM
1966 vm_offset_t va;
1967 int s;
1968 boolean_t firstpage = TRUE;
1969
1970#ifdef DEBUG
1971 if (pmapdebug & PDB_BITS)
1972 printf("pmap_changebit(%x, %x, %s)\n",
1973 pa, bit, setem ? "set" : "clear");
1974#endif
1975 if (pa < vm_first_phys || pa >= vm_last_phys)
1976 return;
1977
1978 pv = pa_to_pvh(pa);
1979 s = splimp();
1980 /*
1981 * Clear saved attributes (modify, reference)
1982 */
1983 if (!setem)
1984 pmap_attributes[pa_index(pa)] &= ~bit;
9acfa6cd
MH
1985#if defined(HP380)
1986 /*
1987 * If we are changing caching status or protection
1988 * make sure the caches are flushed.
1989 */
1990 if (mmutype == MMU_68040 &&
1991 (bit == PG_RO && setem || (bit & PG_CMASK))) {
1992 DCFP(pa);
1993 ICPP(pa);
1994 }
1995#endif
8f961915
KM
1996 /*
1997 * Loop over all current mappings setting/clearing as appropos
1998 * If setting RO do we need to clear the VAC?
1999 */
2059b854 2000 if (pv->pv_pmap != NULL) {
8f961915
KM
2001#ifdef DEBUG
2002 int toflush = 0;
2003#endif
2004 for (; pv; pv = pv->pv_next) {
2005#ifdef DEBUG
2006 toflush |= (pv->pv_pmap == kernel_pmap) ? 2 : 1;
2007#endif
2008 va = pv->pv_va;
4bc66f7c
MH
2009
2010 /*
2011 * XXX don't write protect pager mappings
2012 */
2013 if (bit == PG_RO) {
2014 extern vm_offset_t pager_sva, pager_eva;
2015
2016 if (va >= pager_sva && va < pager_eva)
2017 continue;
2018 }
2019
8f961915
KM
2020 pte = (int *) pmap_pte(pv->pv_pmap, va);
2021 /*
2022 * Flush VAC to ensure we get correct state of HW bits
2023 * so we don't clobber them.
2024 */
2025 if (firstpage && pmap_aliasmask) {
2026 firstpage = FALSE;
2027 DCIS();
2028 }
9acfa6cd
MH
2029#if defined(DYNPGSIZE)
2030 {
2031 register int ix = 0;
2032
2033 do {
2034 if (setem)
2035 npte = *pte | bit;
2036 else
2037 npte = *pte & ~bit;
2038 if (*pte != npte) {
2039 *pte = npte;
2040 TBIS(va);
2041 }
2042 va += HP_PAGE_SIZE;
2043 pte++;
2044 } while (++ix != hppagesperpage);
2045 }
2046#else
2047 if (setem)
2048 npte = *pte | bit;
2049 else
2050 npte = *pte & ~bit;
2051 if (*pte != npte) {
2052 *pte = npte;
2053 TBIS(va);
2054 }
2055#endif
8f961915
KM
2056 }
2057#ifdef DEBUG
2058 if (setem && bit == PG_RO && (pmapvacflush & PVF_PROTECT)) {
2059 if ((pmapvacflush & PVF_TOTAL) || toflush == 3)
2060 DCIA();
2061 else if (toflush == 2)
2062 DCIS();
2063 else
2064 DCIU();
2065 }
2066#endif
2067 }
2068 splx(s);
2069}
2070
2071/* static */
2072void
2073pmap_enter_ptpage(pmap, va)
2074 register pmap_t pmap;
2075 register vm_offset_t va;
2076{
2077 register vm_offset_t ptpa;
2078 register pv_entry_t pv;
2079 st_entry_t *ste;
2080 int s;
2081
2082#ifdef DEBUG
2083 if (pmapdebug & (PDB_FOLLOW|PDB_ENTER|PDB_PTPAGE))
2084 printf("pmap_enter_ptpage: pmap %x, va %x\n", pmap, va);
2085 enter_stats.ptpneeded++;
2086#endif
2087 /*
2088 * Allocate a segment table if necessary. Note that it is allocated
2089 * from kernel_map and not pt_map. This keeps user page tables
2090 * aligned on segment boundaries in the kernel address space.
2091 * The segment table is wired down. It will be freed whenever the
2092 * reference count drops to zero.
2093 */
2094 if (pmap->pm_stab == Segtabzero) {
2095 pmap->pm_stab = (st_entry_t *)
2096 kmem_alloc(kernel_map, HP_STSIZE);
9acfa6cd
MH
2097 pmap->pm_stpa = (st_entry_t *)
2098 pmap_extract(kernel_pmap, (vm_offset_t)pmap->pm_stab);
2099#if defined(HP380)
2100 if (mmutype == MMU_68040) {
2101#ifdef DEBUG
2102 if (dowriteback && dokwriteback)
2103#endif
2104 pmap_changebit((vm_offset_t)pmap->pm_stab, PG_CCB, 0);
2105 pmap->pm_stfree = protostfree;
2106 }
2107#endif
8f961915
KM
2108 pmap->pm_stchanged = TRUE;
2109 /*
2110 * XXX may have changed segment table pointer for current
2111 * process so update now to reload hardware.
2112 */
2059b854
MK
2113 if (pmap == curproc->p_vmspace->vm_map.pmap)
2114 PMAP_ACTIVATE(pmap, (struct pcb *)curproc->p_addr, 1);
8f961915
KM
2115#ifdef DEBUG
2116 if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
9acfa6cd
MH
2117 printf("enter: pmap %x stab %x(%x)\n",
2118 pmap, pmap->pm_stab, pmap->pm_stpa);
8f961915
KM
2119#endif
2120 }
2121
2122 ste = pmap_ste(pmap, va);
9acfa6cd
MH
2123#if defined(HP380)
2124 /*
2125 * Allocate level 2 descriptor block if necessary
2126 */
2127 if (mmutype == MMU_68040) {
2128 if (!ste->sg_v) {
2129 int ix;
2130 caddr_t addr;
2131
2132 ix = bmtol2(pmap->pm_stfree);
2133 if (ix == -1)
2134 panic("enter: out of address space"); /* XXX */
2135 pmap->pm_stfree &= ~l2tobm(ix);
2136 addr = (caddr_t)&pmap->pm_stab[ix*SG4_LEV2SIZE];
2137 bzero(addr, SG4_LEV2SIZE*sizeof(st_entry_t));
2138 addr = (caddr_t)&pmap->pm_stpa[ix*SG4_LEV2SIZE];
2139 *(int *)ste = (u_int)addr | SG_RW | SG_U | SG_V;
2140#ifdef DEBUG
2141 if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
2142 printf("enter: alloc ste2 %d(%x)\n", ix, addr);
2143#endif
2144 }
2145 ste = pmap_ste2(pmap, va);
2146 /*
2147 * Since a level 2 descriptor maps a block of SG4_LEV3SIZE
2148 * level 3 descriptors, we need a chunk of NPTEPG/SG4_LEV3SIZE
2149 * (16) such descriptors (NBPG/SG4_LEV3SIZE bytes) to map a
2150 * PT page--the unit of allocation. We set `ste' to point
2151 * to the first entry of that chunk which is validated in its
2152 * entirety below.
2153 */
2154 ste = (st_entry_t *)((int)ste & ~(NBPG/SG4_LEV3SIZE-1));
2155#ifdef DEBUG
2156 if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
2157 printf("enter: ste2 %x (%x)\n",
2158 pmap_ste2(pmap, va), ste);
2159#endif
2160 }
2161#endif
8f961915
KM
2162 va = trunc_page((vm_offset_t)pmap_pte(pmap, va));
2163
2164 /*
2165 * In the kernel we allocate a page from the kernel PT page
2166 * free list and map it into the kernel page table map (via
2167 * pmap_enter).
2168 */
2169 if (pmap == kernel_pmap) {
2170 register struct kpt_page *kpt;
2171
2172 s = splimp();
2173 if ((kpt = kpt_free_list) == (struct kpt_page *)0) {
2174 /*
2175 * No PT pages available.
2176 * Try once to free up unused ones.
2177 */
2178#ifdef DEBUG
2179 if (pmapdebug & PDB_COLLECT)
2180 printf("enter: no KPT pages, collecting...\n");
2181#endif
2182 pmap_collect(kernel_pmap);
2183 if ((kpt = kpt_free_list) == (struct kpt_page *)0)
2184 panic("pmap_enter_ptpage: can't get KPT page");
2185 }
2186#ifdef DEBUG
2187 if (++kpt_stats.kptinuse > kpt_stats.kptmaxuse)
2188 kpt_stats.kptmaxuse = kpt_stats.kptinuse;
2189#endif
2190 kpt_free_list = kpt->kpt_next;
2191 kpt->kpt_next = kpt_used_list;
2192 kpt_used_list = kpt;
2193 ptpa = kpt->kpt_pa;
cb6a4b59 2194 bzero((caddr_t)kpt->kpt_va, HP_PAGE_SIZE);
8f961915
KM
2195 pmap_enter(pmap, va, ptpa, VM_PROT_DEFAULT, TRUE);
2196#ifdef DEBUG
9acfa6cd
MH
2197 if (pmapdebug & (PDB_ENTER|PDB_PTPAGE)) {
2198 int ix = pmap_ste(pmap, va) - pmap_ste(pmap, 0);
2199
8f961915 2200 printf("enter: add &Sysptmap[%d]: %x (KPT page %x)\n",
9acfa6cd
MH
2201 ix, *(int *)&Sysptmap[ix], kpt->kpt_va);
2202 }
8f961915
KM
2203#endif
2204 splx(s);
2205 }
2206 /*
2207 * For user processes we just simulate a fault on that location
2208 * letting the VM system allocate a zero-filled page.
2209 */
2210 else {
2211#ifdef DEBUG
2212 if (pmapdebug & (PDB_ENTER|PDB_PTPAGE))
2213 printf("enter: about to fault UPT pg at %x\n", va);
9acfa6cd
MH
2214 s = vm_fault(pt_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE);
2215 if (s != KERN_SUCCESS) {
2216 printf("vm_fault(pt_map, %x, RW, 0) -> %d\n", va, s);
2217 panic("pmap_enter: vm_fault failed");
2218 }
2219#else
8f961915
KM
2220 if (vm_fault(pt_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE)
2221 != KERN_SUCCESS)
2222 panic("pmap_enter: vm_fault failed");
9acfa6cd 2223#endif
8f961915
KM
2224 ptpa = pmap_extract(kernel_pmap, va);
2225#ifdef DEBUG
2cbf9af3 2226 PHYS_TO_VM_PAGE(ptpa)->flags |= PG_PTPAGE;
8f961915
KM
2227#endif
2228 }
9acfa6cd
MH
2229#if defined(HP380)
2230 /*
2231 * Turn off copyback caching of page table pages,
2232 * could get ugly otherwise.
2233 */
2234#ifdef DEBUG
2235 if (dowriteback && dokwriteback)
2236#endif
2237 if (mmutype == MMU_68040) {
2238 int *pte = (int *)pmap_pte(kernel_pmap, va);
2239#ifdef DEBUG
2240 if ((pmapdebug & PDB_PARANOIA) && (*pte & PG_CCB) == 0)
2241 printf("%s PT no CCB: kva=%x ptpa=%x pte@%x=%x\n",
2242 pmap == kernel_pmap ? "Kernel" : "User",
2243 va, ptpa, pte, *pte);
2244#endif
2245 pmap_changebit(ptpa, PG_CCB, 0);
2246 }
2247#endif
8f961915
KM
2248 /*
2249 * Locate the PV entry in the kernel for this PT page and
2250 * record the STE address. This is so that we can invalidate
2251 * the STE when we remove the mapping for the page.
2252 */
2253 pv = pa_to_pvh(ptpa);
2254 s = splimp();
2255 if (pv) {
2256 pv->pv_flags |= PV_PTPAGE;
2257 do {
2258 if (pv->pv_pmap == kernel_pmap && pv->pv_va == va)
2259 break;
2260 } while (pv = pv->pv_next);
2261 }
2262#ifdef DEBUG
2059b854 2263 if (pv == NULL)
8f961915
KM
2264 panic("pmap_enter_ptpage: PT page not entered");
2265#endif
2266 pv->pv_ptste = ste;
2267 pv->pv_ptpmap = pmap;
2268#ifdef DEBUG
2269 if (pmapdebug & (PDB_ENTER|PDB_PTPAGE))
2270 printf("enter: new PT page at PA %x, ste at %x\n", ptpa, ste);
2271#endif
2272
2273 /*
2274 * Map the new PT page into the segment table.
2275 * Also increment the reference count on the segment table if this
2276 * was a user page table page. Note that we don't use vm_map_pageable
2277 * to keep the count like we do for PT pages, this is mostly because
2278 * it would be difficult to identify ST pages in pmap_pageable to
2279 * release them. We also avoid the overhead of vm_map_pageable.
2280 */
9acfa6cd
MH
2281#if defined(HP380)
2282 if (mmutype == MMU_68040) {
2283 st_entry_t *este;
2284
2285 for (este = &ste[NPTEPG/SG4_LEV3SIZE]; ste < este; ste++) {
2286 *(int *)ste = ptpa | SG_U | SG_RW | SG_V;
2287 ptpa += SG4_LEV3SIZE * sizeof(st_entry_t);
2288 }
2289 } else
2290#endif
8f961915
KM
2291 *(int *)ste = (ptpa & SG_FRAME) | SG_RW | SG_V;
2292 if (pmap != kernel_pmap) {
2293 pmap->pm_sref++;
2294#ifdef DEBUG
2295 if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
2296 printf("enter: stab %x refcnt %d\n",
2297 pmap->pm_stab, pmap->pm_sref);
2298#endif
2299 }
2300 /*
2301 * Flush stale TLB info.
2302 */
2303 if (pmap == kernel_pmap)
2304 TBIAS();
2305 else
2306 TBIAU();
2307 pmap->pm_ptpages++;
2308 splx(s);
2309}
2310
2311#ifdef DEBUG
2312pmap_pvdump(pa)
2313 vm_offset_t pa;
2314{
2315 register pv_entry_t pv;
2316
2317 printf("pa %x", pa);
2318 for (pv = pa_to_pvh(pa); pv; pv = pv->pv_next)
2319 printf(" -> pmap %x, va %x, ptste %x, ptpmap %x, flags %x",
2320 pv->pv_pmap, pv->pv_va, pv->pv_ptste, pv->pv_ptpmap,
2321 pv->pv_flags);
2322 printf("\n");
2323}
2324
2325pmap_check_wiring(str, va)
2326 char *str;
2327 vm_offset_t va;
2328{
2329 vm_map_entry_t entry;
2330 register int count, *pte;
2331
2332 va = trunc_page(va);
9acfa6cd 2333 if (!pmap_ste_v(kernel_pmap, va) ||
8f961915
KM
2334 !pmap_pte_v(pmap_pte(kernel_pmap, va)))
2335 return;
2336
2337 if (!vm_map_lookup_entry(pt_map, va, &entry)) {
2338 printf("wired_check: entry for %x not found\n", va);
2339 return;
2340 }
2341 count = 0;
2342 for (pte = (int *)va; pte < (int *)(va+PAGE_SIZE); pte++)
2343 if (*pte)
2344 count++;
2345 if (entry->wired_count != count)
2346 printf("*%s*: %x: w%d/a%d\n",
2347 str, va, entry->wired_count, count);
2348}
2349#endif