merge latest Utah hp300 code including 68040 support
[unix-history] / usr / src / sys / hp300 / hp300 / pmap.c
CommitLineData
8f961915 1/*
8f961915
KM
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
4a4de5a4
KM
6 * the Systems Programming Group of the University of Utah Computer
7 * Science Department.
8f961915 8 *
4a4de5a4 9 * %sccs.include.redist.c%
8f961915 10 *
9acfa6cd 11 * @(#)pmap.c 7.9 (Berkeley) %G%
8f961915
KM
12 */
13
14/*
15 * HP9000/300 series physical map management code.
16 * For 68020/68030 machines with HP, 68551, or 68030 MMUs
17 * (models 320,350,318,319,330,340,360,370,345,375)
18 * Don't even pay lip service to multiprocessor support.
4bc66f7c 19 *
9acfa6cd
MH
20 * XXX will only work for PAGE_SIZE == NBPG (i.e. 4096 bytes).
21 * Hence, there is no point in defining DYNPGSIZE.
8f961915
KM
22 */
23
24/*
25 * Manages physical address maps.
26 *
27 * In addition to hardware address maps, this
28 * module is called upon to provide software-use-only
29 * maps which may or may not be stored in the same
30 * form as hardware maps. These pseudo-maps are
31 * used to store intermediate results from copy
32 * operations to and from address spaces.
33 *
34 * Since the information managed by this module is
35 * also stored by the logical address mapping module,
36 * this module may throw away valid virtual-to-physical
37 * mappings at almost any time. However, invalidations
38 * of virtual-to-physical mappings must be done as
39 * requested.
40 *
41 * In order to cope with hardware architectures which
42 * make virtual-to-physical map invalidates expensive,
43 * this module may delay invalidate or reduced protection
44 * operations until such time as they are actually
45 * necessary. This module is given full information as
46 * to which processors are currently using which maps,
47 * and to when physical maps must be made correct.
48 */
49
50#include "param.h"
8f961915 51#include "proc.h"
8f961915 52#include "malloc.h"
2059b854 53#include "user.h"
8f961915 54
2059b854 55#include "pte.h"
8f961915 56
2059b854
MK
57#include "vm/vm.h"
58#include "vm/vm_kern.h"
59#include "vm/vm_page.h"
2059b854
MK
60
61#include "../include/cpu.h"
8f961915 62
8f961915
KM
63#ifdef DEBUG
64struct {
65 int collectscans;
66 int collectpages;
67 int kpttotal;
68 int kptinuse;
69 int kptmaxuse;
70} kpt_stats;
71struct {
72 int kernel; /* entering kernel mapping */
73 int user; /* entering user mapping */
74 int ptpneeded; /* needed to allocate a PT page */
75 int pwchange; /* no mapping change, just wiring or protection */
76 int wchange; /* no mapping change, just wiring */
77 int mchange; /* was mapped but mapping to different page */
78 int managed; /* a managed page */
79 int firstpv; /* first mapping for this PA */
80 int secondpv; /* second mapping for this PA */
81 int ci; /* cache inhibited */
82 int unmanaged; /* not a managed page */
83 int flushes; /* cache flushes */
84} enter_stats;
85struct {
86 int calls;
87 int removes;
88 int pvfirst;
89 int pvsearch;
90 int ptinvalid;
91 int uflushes;
92 int sflushes;
93} remove_stats;
9acfa6cd
MH
94struct {
95 int calls;
96 int pages;
97 int alreadyro;
98 int alreadyrw;
99} protect_stats;
8f961915
KM
100
101int debugmap = 0;
102int pmapdebug = 0x2000;
103#define PDB_FOLLOW 0x0001
104#define PDB_INIT 0x0002
105#define PDB_ENTER 0x0004
106#define PDB_REMOVE 0x0008
107#define PDB_CREATE 0x0010
108#define PDB_PTPAGE 0x0020
109#define PDB_CACHE 0x0040
110#define PDB_BITS 0x0080
111#define PDB_COLLECT 0x0100
112#define PDB_PROTECT 0x0200
113#define PDB_SEGTAB 0x0400
114#define PDB_PARANOIA 0x2000
115#define PDB_WIRING 0x4000
116#define PDB_PVDUMP 0x8000
117
118int pmapvacflush = 0;
119#define PVF_ENTER 0x01
120#define PVF_REMOVE 0x02
121#define PVF_PROTECT 0x04
122#define PVF_TOTAL 0x80
4bc66f7c 123
9acfa6cd
MH
124#if defined(HP380)
125int dowriteback = 1; /* 68040: enable writeback caching */
126int dokwriteback = 1; /* 68040: enable writeback caching of kernel AS */
127#endif
128
4bc66f7c 129extern vm_offset_t pager_sva, pager_eva;
8f961915
KM
130#endif
131
132/*
133 * Get STEs and PTEs for user/kernel address space
134 */
9acfa6cd
MH
135#if defined(HP380)
136#define pmap_ste(m, v) \
137 (&((m)->pm_stab[(vm_offset_t)(v) \
138 >> (mmutype == MMU_68040 ? SG4_SHIFT1 : SG_ISHIFT)]))
139#define pmap_ste1(m, v) \
140 (&((m)->pm_stab[(vm_offset_t)(v) >> SG4_SHIFT1]))
141/* XXX assumes physically contiguous ST pages (if more than one) */
142#define pmap_ste2(m, v) \
143 (&((m)->pm_stab[(st_entry_t *)(*(u_int *)pmap_ste1(m, v) & SG4_ADDR1) \
144 - (m)->pm_stpa + (((v) & SG4_MASK2) >> SG4_SHIFT2)]))
145#else
8f961915 146#define pmap_ste(m, v) (&((m)->pm_stab[(vm_offset_t)(v) >> SG_ISHIFT]))
9acfa6cd 147#endif
8f961915
KM
148#define pmap_pte(m, v) (&((m)->pm_ptab[(vm_offset_t)(v) >> PG_SHIFT]))
149
9acfa6cd
MH
150#if defined(HP380)
151#define pmap_ste_v(m, v) \
152 (mmutype == MMU_68040 \
153 ? (pmap_ste1(m, v)->sg_v && pmap_ste2(m, v)->sg_v) \
154 : (pmap_ste(m, v)->sg_v))
155#else
156#define pmap_ste_v(m, v) (pmap_ste(m, v)->sg_v)
157#endif
8f961915 158
9acfa6cd 159#define pmap_pte_pa(pte) (*(int *)(pte) & PG_FRAME)
8f961915
KM
160#define pmap_pte_w(pte) ((pte)->pg_w)
161#define pmap_pte_ci(pte) ((pte)->pg_ci)
162#define pmap_pte_m(pte) ((pte)->pg_m)
163#define pmap_pte_u(pte) ((pte)->pg_u)
9acfa6cd 164#define pmap_pte_prot(pte) ((pte)->pg_prot)
8f961915
KM
165#define pmap_pte_v(pte) ((pte)->pg_v)
166#define pmap_pte_set_w(pte, v) ((pte)->pg_w = (v))
167#define pmap_pte_set_prot(pte, v) ((pte)->pg_prot = (v))
168
169/*
170 * Given a map and a machine independent protection code,
171 * convert to a vax protection code.
172 */
173#define pte_prot(m, p) (protection_codes[p])
174int protection_codes[8];
175
176/*
177 * Kernel page table page management.
178 */
179struct kpt_page {
180 struct kpt_page *kpt_next; /* link on either used or free list */
181 vm_offset_t kpt_va; /* always valid kernel VA */
182 vm_offset_t kpt_pa; /* PA of this page (for speed) */
183};
184struct kpt_page *kpt_free_list, *kpt_used_list;
185struct kpt_page *kpt_pages;
186
187/*
188 * Kernel segment/page table and page table map.
189 * The page table map gives us a level of indirection we need to dynamically
190 * expand the page table. It is essentially a copy of the segment table
191 * with PTEs instead of STEs. All are initialized in locore at boot time.
192 * Sysmap will initially contain VM_KERNEL_PT_PAGES pages of PTEs.
193 * Segtabzero is an empty segment table which all processes share til they
194 * reference something.
195 */
196st_entry_t *Sysseg;
197pt_entry_t *Sysmap, *Sysptmap;
9acfa6cd 198st_entry_t *Segtabzero, *Segtabzeropa;
8f961915 199vm_size_t Sysptsize = VM_KERNEL_PT_PAGES;
8f961915
KM
200
201struct pmap kernel_pmap_store;
8f961915
KM
202vm_map_t pt_map;
203
204vm_offset_t avail_start; /* PA of first available physical page */
205vm_offset_t avail_end; /* PA of last available physical page */
206vm_size_t mem_size; /* memory size in bytes */
207vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss)*/
208vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
209vm_offset_t vm_first_phys; /* PA of first managed page */
210vm_offset_t vm_last_phys; /* PA just past last managed page */
9acfa6cd 211#if defined(DYNPGSIZE)
8f961915 212int hppagesperpage; /* PAGE_SIZE / HP_PAGE_SIZE */
9acfa6cd 213#endif
8f961915
KM
214boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
215int pmap_aliasmask; /* seperation at which VA aliasing ok */
216char *pmap_attributes; /* reference and modify bits */
9acfa6cd
MH
217#if defined(HP380)
218int protostfree;
219#endif
8f961915
KM
220
221boolean_t pmap_testbit();
222void pmap_enter_ptpage();
223
fd518746
WN
224/*
225 * Bootstrap memory allocator. This function allows for early dynamic
226 * memory allocation until the virtual memory system has been bootstrapped.
227 * After that point, either kmem_alloc or malloc should be used. This
228 * function works by stealing pages from the (to be) managed page pool,
229 * stealing virtual address space, then mapping the pages and zeroing them.
230 *
231 * It should be used from pmap_bootstrap till vm_page_startup, afterwards
232 * it cannot be used, and will generate a panic if tried. Note that this
233 * memory will never be freed, and in essence it is wired down.
234 */
235void *
236pmap_bootstrap_alloc(size) {
237 vm_offset_t val;
238 int i;
239 extern boolean_t vm_page_startup_initialized;
240
241 if (vm_page_startup_initialized)
242 panic("pmap_bootstrap_alloc: called after startup initialized");
243 size = round_page(size);
244 val = virtual_avail;
245
246 virtual_avail = pmap_map(virtual_avail, avail_start,
247 avail_start + size, VM_PROT_READ|VM_PROT_WRITE);
248 avail_start += size;
249
250 blkclr ((caddr_t) val, size);
251 return ((void *) val);
252}
253
8f961915
KM
254/*
255 * Initialize the pmap module.
256 * Called by vm_init, to initialize any structures that the pmap
257 * system needs to map virtual memory.
258 */
259void
260pmap_init(phys_start, phys_end)
261 vm_offset_t phys_start, phys_end;
262{
263 vm_offset_t addr, addr2;
264 vm_size_t npg, s;
265 int rv;
a1af79ae 266 extern char kstack[];
8f961915
KM
267
268#ifdef DEBUG
269 if (pmapdebug & PDB_FOLLOW)
270 printf("pmap_init(%x, %x)\n", phys_start, phys_end);
271#endif
272 /*
273 * Now that kernel map has been allocated, we can mark as
274 * unavailable regions which we have mapped in locore.
275 */
4bc66f7c 276 addr = (vm_offset_t) intiobase;
2059b854 277 (void) vm_map_find(kernel_map, NULL, (vm_offset_t) 0,
4bc66f7c
MH
278 &addr, hp300_ptob(IIOMAPSIZE+EIOMAPSIZE), FALSE);
279 if (addr != (vm_offset_t)intiobase)
8f961915
KM
280 goto bogons;
281 addr = (vm_offset_t) Sysmap;
282 vm_object_reference(kernel_object);
283 (void) vm_map_find(kernel_map, kernel_object, addr,
284 &addr, HP_MAX_PTSIZE, FALSE);
285 /*
286 * If this fails it is probably because the static portion of
287 * the kernel page table isn't big enough and we overran the
288 * page table map. Need to adjust pmap_size() in hp300_init.c.
289 */
290 if (addr != (vm_offset_t)Sysmap)
291 goto bogons;
292
a1af79ae 293 addr = (vm_offset_t) kstack;
8f961915
KM
294 vm_object_reference(kernel_object);
295 (void) vm_map_find(kernel_map, kernel_object, addr,
296 &addr, hp300_ptob(UPAGES), FALSE);
a1af79ae 297 if (addr != (vm_offset_t)kstack)
8f961915
KM
298bogons:
299 panic("pmap_init: bogons in the VM system!\n");
300
301#ifdef DEBUG
302 if (pmapdebug & PDB_INIT) {
303 printf("pmap_init: Sysseg %x, Sysmap %x, Sysptmap %x\n",
304 Sysseg, Sysmap, Sysptmap);
305 printf(" pstart %x, pend %x, vstart %x, vend %x\n",
306 avail_start, avail_end, virtual_avail, virtual_end);
307 }
308#endif
309
310 /*
311 * Allocate memory for random pmap data structures. Includes the
312 * initial segment table, pv_head_table and pmap_attributes.
313 */
314 npg = atop(phys_end - phys_start);
315 s = (vm_size_t) (HP_STSIZE + sizeof(struct pv_entry) * npg + npg);
316 s = round_page(s);
317 addr = (vm_offset_t) kmem_alloc(kernel_map, s);
318 Segtabzero = (st_entry_t *) addr;
9acfa6cd 319 Segtabzeropa = (st_entry_t *) pmap_extract(kernel_pmap, addr);
8f961915
KM
320 addr += HP_STSIZE;
321 pv_table = (pv_entry_t) addr;
322 addr += sizeof(struct pv_entry) * npg;
323 pmap_attributes = (char *) addr;
324#ifdef DEBUG
325 if (pmapdebug & PDB_INIT)
9acfa6cd
MH
326 printf("pmap_init: %x bytes: npg %x s0 %x(%x) tbl %x atr %x\n",
327 s, npg, Segtabzero, Segtabzeropa,
328 pv_table, pmap_attributes);
8f961915
KM
329#endif
330
331 /*
332 * Allocate physical memory for kernel PT pages and their management.
333 * We need 1 PT page per possible task plus some slop.
334 */
2059b854 335 npg = min(atop(HP_MAX_KPTSIZE), maxproc+16);
8f961915
KM
336 s = ptoa(npg) + round_page(npg * sizeof(struct kpt_page));
337
338 /*
339 * Verify that space will be allocated in region for which
340 * we already have kernel PT pages.
341 */
342 addr = 0;
2059b854 343 rv = vm_map_find(kernel_map, NULL, 0, &addr, s, TRUE);
8f961915
KM
344 if (rv != KERN_SUCCESS || addr + s >= (vm_offset_t)Sysmap)
345 panic("pmap_init: kernel PT too small");
346 vm_map_remove(kernel_map, addr, addr + s);
347
348 /*
349 * Now allocate the space and link the pages together to
350 * form the KPT free list.
351 */
352 addr = (vm_offset_t) kmem_alloc(kernel_map, s);
353 s = ptoa(npg);
354 addr2 = addr + s;
355 kpt_pages = &((struct kpt_page *)addr2)[npg];
356 kpt_free_list = (struct kpt_page *) 0;
357 do {
358 addr2 -= HP_PAGE_SIZE;
359 (--kpt_pages)->kpt_next = kpt_free_list;
360 kpt_free_list = kpt_pages;
361 kpt_pages->kpt_va = addr2;
362 kpt_pages->kpt_pa = pmap_extract(kernel_pmap, addr2);
363 } while (addr != addr2);
364#ifdef DEBUG
365 kpt_stats.kpttotal = atop(s);
366 if (pmapdebug & PDB_INIT)
367 printf("pmap_init: KPT: %d pages from %x to %x\n",
368 atop(s), addr, addr + s);
369#endif
370
371 /*
372 * Slightly modified version of kmem_suballoc() to get page table
373 * map where we want it.
374 */
375 addr = HP_PTBASE;
2059b854 376 s = min(HP_PTMAXSIZE, maxproc*HP_MAX_PTSIZE);
8f961915 377 addr2 = addr + s;
2059b854 378 rv = vm_map_find(kernel_map, NULL, 0, &addr, s, TRUE);
8f961915
KM
379 if (rv != KERN_SUCCESS)
380 panic("pmap_init: cannot allocate space for PT map");
381 pmap_reference(vm_map_pmap(kernel_map));
382 pt_map = vm_map_create(vm_map_pmap(kernel_map), addr, addr2, TRUE);
2059b854 383 if (pt_map == NULL)
8f961915
KM
384 panic("pmap_init: cannot create pt_map");
385 rv = vm_map_submap(kernel_map, addr, addr2, pt_map);
386 if (rv != KERN_SUCCESS)
387 panic("pmap_init: cannot map range to pt_map");
388#ifdef DEBUG
389 if (pmapdebug & PDB_INIT)
390 printf("pmap_init: pt_map [%x - %x)\n", addr, addr2);
391#endif
392
9acfa6cd
MH
393#if defined(HP380)
394 if (mmutype == MMU_68040) {
395 protostfree = ~l2tobm(0);
396 for (rv = MAXUL2SIZE; rv < sizeof(protostfree)*NBBY; rv++)
397 protostfree &= ~l2tobm(rv);
398 }
399#endif
400
8f961915
KM
401 /*
402 * Now it is safe to enable pv_table recording.
403 */
404 vm_first_phys = phys_start;
405 vm_last_phys = phys_end;
406 pmap_initialized = TRUE;
407}
408
409/*
410 * Used to map a range of physical addresses into kernel
411 * virtual address space.
412 *
413 * For now, VM is already on, we only need to map the
414 * specified memory.
415 */
416vm_offset_t
417pmap_map(virt, start, end, prot)
418 vm_offset_t virt;
419 vm_offset_t start;
420 vm_offset_t end;
421 int prot;
422{
423#ifdef DEBUG
424 if (pmapdebug & PDB_FOLLOW)
425 printf("pmap_map(%x, %x, %x, %x)\n", virt, start, end, prot);
426#endif
427 while (start < end) {
428 pmap_enter(kernel_pmap, virt, start, prot, FALSE);
429 virt += PAGE_SIZE;
430 start += PAGE_SIZE;
431 }
432 return(virt);
433}
434
435/*
436 * Create and return a physical map.
437 *
438 * If the size specified for the map
439 * is zero, the map is an actual physical
440 * map, and may be referenced by the
441 * hardware.
442 *
443 * If the size specified is non-zero,
444 * the map will be used in software only, and
445 * is bounded by that size.
446 */
447pmap_t
448pmap_create(size)
449 vm_size_t size;
450{
451 register pmap_t pmap;
452
453#ifdef DEBUG
454 if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
455 printf("pmap_create(%x)\n", size);
456#endif
457 /*
458 * Software use map does not need a pmap
459 */
460 if (size)
2059b854 461 return(NULL);
8f961915
KM
462
463 /* XXX: is it ok to wait here? */
464 pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK);
2059b854
MK
465#ifdef notifwewait
466 if (pmap == NULL)
8f961915 467 panic("pmap_create: cannot allocate a pmap");
2059b854
MK
468#endif
469 bzero(pmap, sizeof(*pmap));
470 pmap_pinit(pmap);
471 return (pmap);
472}
8f961915 473
2059b854
MK
474/*
475 * Initialize a preallocated and zeroed pmap structure,
476 * such as one in a vmspace structure.
477 */
478void
479pmap_pinit(pmap)
480 register struct pmap *pmap;
481{
482
483#ifdef DEBUG
484 if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
485 printf("pmap_pinit(%x)\n", pmap);
486#endif
8f961915
KM
487 /*
488 * No need to allocate page table space yet but we do need a
489 * valid segment table. Initially, we point everyone at the
490 * "null" segment table. On the first pmap_enter, a real
491 * segment table will be allocated.
492 */
8f961915 493 pmap->pm_stab = Segtabzero;
9acfa6cd
MH
494 pmap->pm_stpa = Segtabzeropa;
495#if defined(HP380)
496 if (mmutype == MMU_68040)
497 pmap->pm_stfree = protostfree;
498#endif
8f961915 499 pmap->pm_stchanged = TRUE;
8f961915
KM
500 pmap->pm_count = 1;
501 simple_lock_init(&pmap->pm_lock);
8f961915
KM
502}
503
504/*
505 * Retire the given physical map from service.
506 * Should only be called if the map contains
507 * no valid mappings.
508 */
509void
510pmap_destroy(pmap)
511 register pmap_t pmap;
512{
513 int count;
514
515#ifdef DEBUG
516 if (pmapdebug & PDB_FOLLOW)
517 printf("pmap_destroy(%x)\n", pmap);
518#endif
2059b854 519 if (pmap == NULL)
8f961915
KM
520 return;
521
522 simple_lock(&pmap->pm_lock);
523 count = --pmap->pm_count;
524 simple_unlock(&pmap->pm_lock);
2059b854
MK
525 if (count == 0) {
526 pmap_release(pmap);
527 free((caddr_t)pmap, M_VMPMAP);
528 }
529}
8f961915 530
2059b854
MK
531/*
532 * Release any resources held by the given physical map.
533 * Called when a pmap initialized by pmap_pinit is being released.
534 * Should only be called if the map contains no valid mappings.
535 */
536void
537pmap_release(pmap)
538 register struct pmap *pmap;
539{
540
541#ifdef DEBUG
542 if (pmapdebug & PDB_FOLLOW)
543 printf("pmap_release(%x)\n", pmap);
544#endif
545#ifdef notdef /* DIAGNOSTIC */
546 /* count would be 0 from pmap_destroy... */
547 simple_lock(&pmap->pm_lock);
548 if (pmap->pm_count != 1)
549 panic("pmap_release count");
550#endif
8f961915
KM
551 if (pmap->pm_ptab)
552 kmem_free_wakeup(pt_map, (vm_offset_t)pmap->pm_ptab,
553 HP_MAX_PTSIZE);
554 if (pmap->pm_stab != Segtabzero)
555 kmem_free(kernel_map, (vm_offset_t)pmap->pm_stab, HP_STSIZE);
8f961915
KM
556}
557
558/*
559 * Add a reference to the specified pmap.
560 */
561void
562pmap_reference(pmap)
563 pmap_t pmap;
564{
565#ifdef DEBUG
566 if (pmapdebug & PDB_FOLLOW)
567 printf("pmap_reference(%x)\n", pmap);
568#endif
2059b854 569 if (pmap != NULL) {
8f961915
KM
570 simple_lock(&pmap->pm_lock);
571 pmap->pm_count++;
572 simple_unlock(&pmap->pm_lock);
573 }
574}
575
576/*
577 * Remove the given range of addresses from the specified map.
578 *
579 * It is assumed that the start and end are properly
580 * rounded to the page size.
581 */
582void
583pmap_remove(pmap, sva, eva)
584 register pmap_t pmap;
585 vm_offset_t sva, eva;
586{
587 register vm_offset_t pa, va;
588 register pt_entry_t *pte;
589 register pv_entry_t pv, npv;
8f961915
KM
590 pmap_t ptpmap;
591 int *ste, s, bits;
592 boolean_t firstpage = TRUE;
593 boolean_t flushcache = FALSE;
594#ifdef DEBUG
595 pt_entry_t opte;
596
597 if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
598 printf("pmap_remove(%x, %x, %x)\n", pmap, sva, eva);
599#endif
600
2059b854 601 if (pmap == NULL)
8f961915
KM
602 return;
603
604#ifdef DEBUG
605 remove_stats.calls++;
606#endif
607 for (va = sva; va < eva; va += PAGE_SIZE) {
608 /*
609 * Weed out invalid mappings.
610 * Note: we assume that the segment table is always allocated.
611 */
9acfa6cd 612 if (!pmap_ste_v(pmap, va)) {
8f961915 613 /* XXX: avoid address wrap around */
9acfa6cd 614 if (va >= hp300_trunc_seg(VM_MAX_ADDRESS))
8f961915
KM
615 break;
616 va = hp300_round_seg(va + PAGE_SIZE) - PAGE_SIZE;
617 continue;
618 }
619 pte = pmap_pte(pmap, va);
620 pa = pmap_pte_pa(pte);
621 if (pa == 0)
622 continue;
623 /*
624 * Invalidating a non-CI page, must flush external VAC
625 * unless it is a supervisor mapping and we have already
626 * flushed the supervisor side.
627 */
628 if (pmap_aliasmask && !pmap_pte_ci(pte) &&
629 !(pmap == kernel_pmap && firstpage))
630 flushcache = TRUE;
631#ifdef DEBUG
632 opte = *pte;
633 remove_stats.removes++;
634#endif
635 /*
636 * Update statistics
637 */
638 if (pmap_pte_w(pte))
639 pmap->pm_stats.wired_count--;
640 pmap->pm_stats.resident_count--;
641
642 /*
643 * Invalidate the PTEs.
644 * XXX: should cluster them up and invalidate as many
645 * as possible at once.
646 */
647#ifdef DEBUG
648 if (pmapdebug & PDB_REMOVE)
9acfa6cd 649 printf("remove: invalidating pte at %x\n", pte);
8f961915
KM
650#endif
651 /*
652 * Flush VAC to ensure we get the correct state of any
653 * hardware maintained bits.
654 */
655 if (firstpage && pmap_aliasmask) {
656 firstpage = FALSE;
657 if (pmap == kernel_pmap)
658 flushcache = FALSE;
659 DCIS();
660#ifdef DEBUG
661 remove_stats.sflushes++;
662#endif
663 }
9acfa6cd
MH
664#if defined(DYNPGSIZE)
665 {
666 register int ix = 0;
8f961915 667
9acfa6cd
MH
668 bits = 0;
669 do {
670 bits |= *(int *)pte & (PG_U|PG_M);
671 *(int *)pte++ = PG_NV;
672 TBIS(va + ix * HP_PAGE_SIZE);
673 } while (++ix != hppagesperpage);
674 }
675#else
676 bits = *(int *)pte & (PG_U|PG_M);
677 *(int *)pte = PG_NV;
678 TBIS(va);
679#endif
8f961915
KM
680 /*
681 * For user mappings decrement the wiring count on
682 * the PT page. We do this after the PTE has been
683 * invalidated because vm_map_pageable winds up in
684 * pmap_pageable which clears the modify bit for the
685 * PT page.
686 */
687 if (pmap != kernel_pmap) {
688 pte = pmap_pte(pmap, va);
689 vm_map_pageable(pt_map, trunc_page(pte),
690 round_page(pte+1), TRUE);
691#ifdef DEBUG
692 if (pmapdebug & PDB_WIRING)
693 pmap_check_wiring("remove", trunc_page(pte));
694#endif
695 }
696 /*
697 * Remove from the PV table (raise IPL since we
698 * may be called at interrupt time).
699 */
700 if (pa < vm_first_phys || pa >= vm_last_phys)
701 continue;
702 pv = pa_to_pvh(pa);
703 ste = (int *)0;
704 s = splimp();
705 /*
706 * If it is the first entry on the list, it is actually
707 * in the header and we must copy the following entry up
708 * to the header. Otherwise we must search the list for
709 * the entry. In either case we free the now unused entry.
710 */
711 if (pmap == pv->pv_pmap && va == pv->pv_va) {
712 ste = (int *)pv->pv_ptste;
713 ptpmap = pv->pv_ptpmap;
714 npv = pv->pv_next;
715 if (npv) {
716 *pv = *npv;
717 free((caddr_t)npv, M_VMPVENT);
718 } else
2059b854 719 pv->pv_pmap = NULL;
8f961915
KM
720#ifdef DEBUG
721 remove_stats.pvfirst++;
722#endif
723 } else {
724 for (npv = pv->pv_next; npv; npv = npv->pv_next) {
725#ifdef DEBUG
726 remove_stats.pvsearch++;
727#endif
728 if (pmap == npv->pv_pmap && va == npv->pv_va)
729 break;
730 pv = npv;
731 }
732#ifdef DEBUG
2059b854 733 if (npv == NULL)
8f961915
KM
734 panic("pmap_remove: PA not in pv_tab");
735#endif
736 ste = (int *)npv->pv_ptste;
737 ptpmap = npv->pv_ptpmap;
738 pv->pv_next = npv->pv_next;
739 free((caddr_t)npv, M_VMPVENT);
740 pv = pa_to_pvh(pa);
741 }
742 /*
743 * If only one mapping left we no longer need to cache inhibit
744 */
745 if (pv->pv_pmap &&
2059b854 746 pv->pv_next == NULL && (pv->pv_flags & PV_CI)) {
8f961915
KM
747#ifdef DEBUG
748 if (pmapdebug & PDB_CACHE)
749 printf("remove: clearing CI for pa %x\n", pa);
750#endif
751 pv->pv_flags &= ~PV_CI;
752 pmap_changebit(pa, PG_CI, FALSE);
753#ifdef DEBUG
754 if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==
755 (PDB_CACHE|PDB_PVDUMP))
756 pmap_pvdump(pa);
757#endif
758 }
759
760 /*
761 * If this was a PT page we must also remove the
762 * mapping from the associated segment table.
763 */
764 if (ste) {
765#ifdef DEBUG
766 remove_stats.ptinvalid++;
767 if (pmapdebug & (PDB_REMOVE|PDB_PTPAGE)) {
768 printf("remove: ste was %x@%x pte was %x@%x\n",
769 *ste, ste,
770 *(int *)&opte, pmap_pte(pmap, va));
771 }
9acfa6cd
MH
772#endif
773#if defined(HP380)
774 if (mmutype == MMU_68040) {
775 int *este = &ste[NPTEPG/SG4_LEV3SIZE];
776
777 while (ste < este)
778 *ste++ = SG_NV;
779 } else
8f961915
KM
780#endif
781 *ste = SG_NV;
782 /*
783 * If it was a user PT page, we decrement the
784 * reference count on the segment table as well,
785 * freeing it if it is now empty.
786 */
787 if (ptpmap != kernel_pmap) {
788#ifdef DEBUG
789 if (pmapdebug & (PDB_REMOVE|PDB_SEGTAB))
790 printf("remove: stab %x, refcnt %d\n",
791 ptpmap->pm_stab,
792 ptpmap->pm_sref - 1);
793 if ((pmapdebug & PDB_PARANOIA) &&
2059b854 794 ptpmap->pm_stab != (st_entry_t *)trunc_page(ste))
8f961915
KM
795 panic("remove: bogus ste");
796#endif
797 if (--(ptpmap->pm_sref) == 0) {
798#ifdef DEBUG
799 if (pmapdebug&(PDB_REMOVE|PDB_SEGTAB))
800 printf("remove: free stab %x\n",
801 ptpmap->pm_stab);
802#endif
803 kmem_free(kernel_map,
804 (vm_offset_t)ptpmap->pm_stab,
805 HP_STSIZE);
806 ptpmap->pm_stab = Segtabzero;
9acfa6cd
MH
807 ptpmap->pm_stpa = Segtabzeropa;
808#if defined(HP380)
809 if (mmutype == MMU_68040)
810 ptpmap->pm_stfree = protostfree;
811#endif
8f961915
KM
812 ptpmap->pm_stchanged = TRUE;
813 /*
814 * XXX may have changed segment table
815 * pointer for current process so
816 * update now to reload hardware.
817 */
2059b854 818 if (ptpmap == curproc->p_vmspace->vm_map.pmap)
8f961915 819 PMAP_ACTIVATE(ptpmap,
2059b854 820 (struct pcb *)curproc->p_addr, 1);
8f961915
KM
821 }
822 }
823 if (ptpmap == kernel_pmap)
824 TBIAS();
825 else
826 TBIAU();
827 pv->pv_flags &= ~PV_PTPAGE;
828 ptpmap->pm_ptpages--;
829 }
830 /*
831 * Update saved attributes for managed page
832 */
833 pmap_attributes[pa_index(pa)] |= bits;
834 splx(s);
835 }
836#ifdef DEBUG
837 if (pmapvacflush & PVF_REMOVE) {
838 if (pmapvacflush & PVF_TOTAL)
839 DCIA();
840 else if (pmap == kernel_pmap)
841 DCIS();
842 else
843 DCIU();
844 }
845#endif
846 if (flushcache) {
847 if (pmap == kernel_pmap) {
848 DCIS();
849#ifdef DEBUG
850 remove_stats.sflushes++;
851#endif
852 } else {
853 DCIU();
854#ifdef DEBUG
855 remove_stats.uflushes++;
856#endif
857 }
858 }
859}
860
861/*
4bc66f7c
MH
862 * pmap_page_protect:
863 *
864 * Lower the permission for all mappings to a given page.
8f961915
KM
865 */
866void
4bc66f7c
MH
867pmap_page_protect(pa, prot)
868 vm_offset_t pa;
869 vm_prot_t prot;
8f961915
KM
870{
871 register pv_entry_t pv;
872 int s;
873
874#ifdef DEBUG
4bc66f7c
MH
875 if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
876 prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE))
877 printf("pmap_page_protect(%x, %x)\n", pa, prot);
8f961915 878#endif
8f961915
KM
879 if (pa < vm_first_phys || pa >= vm_last_phys)
880 return;
881
4bc66f7c
MH
882 switch (prot) {
883 case VM_PROT_ALL:
884 break;
885 /* copy_on_write */
886 case VM_PROT_READ:
887 case VM_PROT_READ|VM_PROT_EXECUTE:
888 pmap_changebit(pa, PG_RO, TRUE);
889 break;
890 /* remove_all */
891 default:
892 pv = pa_to_pvh(pa);
893 s = splimp();
894 while (pv->pv_pmap != NULL) {
8f961915 895#ifdef DEBUG
9acfa6cd 896 if (!pmap_ste_v(pv->pv_pmap, pv->pv_va) ||
4bc66f7c
MH
897 pmap_pte_pa(pmap_pte(pv->pv_pmap,pv->pv_va)) != pa)
898 panic("pmap_page_protect: bad mapping");
8f961915 899#endif
4bc66f7c
MH
900 pmap_remove(pv->pv_pmap, pv->pv_va,
901 pv->pv_va + PAGE_SIZE);
902 }
903 splx(s);
904 break;
8f961915 905 }
8f961915
KM
906}
907
908/*
909 * Set the physical protection on the
910 * specified range of this map as requested.
911 */
912void
913pmap_protect(pmap, sva, eva, prot)
914 register pmap_t pmap;
915 vm_offset_t sva, eva;
916 vm_prot_t prot;
917{
918 register pt_entry_t *pte;
919 register vm_offset_t va;
8f961915
KM
920 int hpprot;
921 boolean_t firstpage = TRUE;
922
923#ifdef DEBUG
924 if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
925 printf("pmap_protect(%x, %x, %x, %x)\n", pmap, sva, eva, prot);
9acfa6cd 926 protect_stats.calls++;
8f961915 927#endif
2059b854 928 if (pmap == NULL)
8f961915
KM
929 return;
930
931 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
932 pmap_remove(pmap, sva, eva);
933 return;
934 }
935 if (prot & VM_PROT_WRITE)
936 return;
937
938 pte = pmap_pte(pmap, sva);
939 hpprot = pte_prot(pmap, prot) == PG_RO ? 1 : 0;
940 for (va = sva; va < eva; va += PAGE_SIZE) {
941 /*
942 * Page table page is not allocated.
943 * Skip it, we don't want to force allocation
944 * of unnecessary PTE pages just to set the protection.
945 */
9acfa6cd 946 if (!pmap_ste_v(pmap, va)) {
8f961915
KM
947 /* XXX: avoid address wrap around */
948 if (va >= hp300_trunc_seg((vm_offset_t)-1))
949 break;
950 va = hp300_round_seg(va + PAGE_SIZE) - PAGE_SIZE;
9acfa6cd
MH
951#if defined(DYNPGSIZE)
952 pte = pmap_pte(pmap, va) + hppagesperpage;
953#else
954 pte = pmap_pte(pmap, va) + 1;
955#endif
8f961915
KM
956 continue;
957 }
958 /*
959 * Page not valid. Again, skip it.
960 * Should we do this? Or set protection anyway?
961 */
962 if (!pmap_pte_v(pte)) {
9acfa6cd 963#if defined(DYNPGSIZE)
8f961915 964 pte += hppagesperpage;
9acfa6cd
MH
965#else
966 pte++;
967#endif
8f961915
KM
968 continue;
969 }
970 /*
9acfa6cd
MH
971 * Purge kernel side of VAC to ensure we get correct state
972 * of HW bits so we don't clobber them.
8f961915
KM
973 */
974 if (firstpage && pmap_aliasmask) {
975 firstpage = FALSE;
976 DCIS();
977 }
9acfa6cd
MH
978#if defined(DYNPGSIZE)
979 {
980 register int ix = 0;
981
982 do {
983 /*
984 * Clear caches as necessary if making RO.
985 * XXX clear VAC? Doesn't seem to be needed.
986 */
987#if defined(HP380)
988 if (hpprot && !pmap_pte_prot(pte)) {
989 vm_offset_t pa = pmap_pte_pa(pte);
990
991 if (mmutype == MMU_68040) {
992 DCFP(pa);
993 ICPP(pa);
994 }
995 }
996#endif
997#ifdef DEBUG
998 protect_stats.pages++;
999 if (hpprot && pmap_pte_prot(pte))
1000 protect_stats.alreadyro++;
1001 if (!hpprot && !pmap_pte_prot(pte))
1002 protect_stats.alreadyrw++;
1003#endif
1004 pmap_pte_set_prot(pte++, hpprot);
1005 TBIS(va + ix * HP_PAGE_SIZE);
1006 } while (++ix != hppagesperpage);
1007 }
1008#else
1009 /*
1010 * Clear caches as necessary if making RO.
1011 * XXX clear VAC? Doesn't seem to be needed.
1012 */
1013#if defined(HP380)
1014 if (hpprot && !pmap_pte_prot(pte)) {
1015 vm_offset_t pa = pmap_pte_pa(pte);
1016
1017 if (mmutype == MMU_68040) {
1018 DCFP(pa);
1019 ICPP(pa);
1020 }
1021 }
1022#endif
1023#ifdef DEBUG
1024 protect_stats.pages++;
1025 if (hpprot && pmap_pte_prot(pte))
1026 protect_stats.alreadyro++;
1027 if (!hpprot && !pmap_pte_prot(pte))
1028 protect_stats.alreadyrw++;
1029#endif
1030 pmap_pte_set_prot(pte++, hpprot);
1031 TBIS(va);
1032#endif
8f961915
KM
1033 }
1034#ifdef DEBUG
1035 if (hpprot && (pmapvacflush & PVF_PROTECT)) {
1036 if (pmapvacflush & PVF_TOTAL)
1037 DCIA();
1038 else if (pmap == kernel_pmap)
1039 DCIS();
1040 else
1041 DCIU();
1042 }
1043#endif
1044}
1045
1046/*
1047 * Insert the given physical page (p) at
1048 * the specified virtual address (v) in the
1049 * target physical map with the protection requested.
1050 *
1051 * If specified, the page will be wired down, meaning
1052 * that the related pte can not be reclaimed.
1053 *
1054 * NB: This is the only routine which MAY NOT lazy-evaluate
1055 * or lose information. That is, this routine must actually
1056 * insert this page into the given map NOW.
1057 */
1058void
1059pmap_enter(pmap, va, pa, prot, wired)
1060 register pmap_t pmap;
1061 vm_offset_t va;
1062 register vm_offset_t pa;
1063 vm_prot_t prot;
1064 boolean_t wired;
1065{
1066 register pt_entry_t *pte;
9acfa6cd 1067 register int npte;
8f961915
KM
1068 vm_offset_t opa;
1069 boolean_t cacheable = TRUE;
1070 boolean_t checkpv = TRUE;
1071
1072#ifdef DEBUG
1073 if (pmapdebug & (PDB_FOLLOW|PDB_ENTER))
1074 printf("pmap_enter(%x, %x, %x, %x, %x)\n",
1075 pmap, va, pa, prot, wired);
1076#endif
2059b854 1077 if (pmap == NULL)
8f961915
KM
1078 return;
1079
1080#ifdef DEBUG
1081 if (pmap == kernel_pmap)
1082 enter_stats.kernel++;
1083 else
1084 enter_stats.user++;
1085#endif
1086 /*
1087 * For user mapping, allocate kernel VM resources if necessary.
1088 */
2059b854 1089 if (pmap->pm_ptab == NULL)
8f961915
KM
1090 pmap->pm_ptab = (pt_entry_t *)
1091 kmem_alloc_wait(pt_map, HP_MAX_PTSIZE);
1092
1093 /*
1094 * Segment table entry not valid, we need a new PT page
1095 */
9acfa6cd 1096 if (!pmap_ste_v(pmap, va))
8f961915
KM
1097 pmap_enter_ptpage(pmap, va);
1098
1099 pte = pmap_pte(pmap, va);
1100 opa = pmap_pte_pa(pte);
1101#ifdef DEBUG
1102 if (pmapdebug & PDB_ENTER)
1103 printf("enter: pte %x, *pte %x\n", pte, *(int *)pte);
1104#endif
1105
1106 /*
1107 * Mapping has not changed, must be protection or wiring change.
1108 */
1109 if (opa == pa) {
1110#ifdef DEBUG
1111 enter_stats.pwchange++;
1112#endif
1113 /*
1114 * Wiring change, just update stats.
1115 * We don't worry about wiring PT pages as they remain
1116 * resident as long as there are valid mappings in them.
1117 * Hence, if a user page is wired, the PT page will be also.
1118 */
1119 if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) {
1120#ifdef DEBUG
1121 if (pmapdebug & PDB_ENTER)
1122 printf("enter: wiring change -> %x\n", wired);
1123#endif
1124 if (wired)
1125 pmap->pm_stats.wired_count++;
1126 else
1127 pmap->pm_stats.wired_count--;
1128#ifdef DEBUG
1129 enter_stats.wchange++;
1130#endif
1131 }
1132 /*
1133 * Retain cache inhibition status
1134 */
1135 checkpv = FALSE;
1136 if (pmap_pte_ci(pte))
1137 cacheable = FALSE;
1138 goto validate;
1139 }
1140
1141 /*
1142 * Mapping has changed, invalidate old range and fall through to
1143 * handle validating new mapping.
1144 */
1145 if (opa) {
1146#ifdef DEBUG
1147 if (pmapdebug & PDB_ENTER)
1148 printf("enter: removing old mapping %x\n", va);
1149#endif
1150 pmap_remove(pmap, va, va + PAGE_SIZE);
1151#ifdef DEBUG
1152 enter_stats.mchange++;
1153#endif
1154 }
1155
1156 /*
1157 * If this is a new user mapping, increment the wiring count
1158 * on this PT page. PT pages are wired down as long as there
1159 * is a valid mapping in the page.
1160 */
1161 if (pmap != kernel_pmap)
1162 vm_map_pageable(pt_map, trunc_page(pte),
1163 round_page(pte+1), FALSE);
1164
1165 /*
1166 * Enter on the PV list if part of our managed memory
1167 * Note that we raise IPL while manipulating pv_table
1168 * since pmap_enter can be called at interrupt time.
1169 */
1170 if (pa >= vm_first_phys && pa < vm_last_phys) {
1171 register pv_entry_t pv, npv;
1172 int s;
1173
1174#ifdef DEBUG
1175 enter_stats.managed++;
1176#endif
1177 pv = pa_to_pvh(pa);
1178 s = splimp();
1179#ifdef DEBUG
1180 if (pmapdebug & PDB_ENTER)
1181 printf("enter: pv at %x: %x/%x/%x\n",
1182 pv, pv->pv_va, pv->pv_pmap, pv->pv_next);
1183#endif
1184 /*
1185 * No entries yet, use header as the first entry
1186 */
2059b854 1187 if (pv->pv_pmap == NULL) {
8f961915
KM
1188#ifdef DEBUG
1189 enter_stats.firstpv++;
1190#endif
1191 pv->pv_va = va;
1192 pv->pv_pmap = pmap;
2059b854
MK
1193 pv->pv_next = NULL;
1194 pv->pv_ptste = NULL;
1195 pv->pv_ptpmap = NULL;
8f961915
KM
1196 pv->pv_flags = 0;
1197 }
1198 /*
1199 * There is at least one other VA mapping this page.
1200 * Place this entry after the header.
1201 */
1202 else {
1203#ifdef DEBUG
1204 for (npv = pv; npv; npv = npv->pv_next)
1205 if (pmap == npv->pv_pmap && va == npv->pv_va)
1206 panic("pmap_enter: already in pv_tab");
1207#endif
1208 npv = (pv_entry_t)
1209 malloc(sizeof *npv, M_VMPVENT, M_NOWAIT);
1210 npv->pv_va = va;
1211 npv->pv_pmap = pmap;
1212 npv->pv_next = pv->pv_next;
2059b854
MK
1213 npv->pv_ptste = NULL;
1214 npv->pv_ptpmap = NULL;
8f961915
KM
1215 pv->pv_next = npv;
1216#ifdef DEBUG
1217 if (!npv->pv_next)
1218 enter_stats.secondpv++;
1219#endif
1220 /*
1221 * Since there is another logical mapping for the
1222 * same page we may need to cache-inhibit the
1223 * descriptors on those CPUs with external VACs.
1224 * We don't need to CI if:
1225 *
1226 * - No two mappings belong to the same user pmaps.
1227 * Since the cache is flushed on context switches
1228 * there is no problem between user processes.
1229 *
1230 * - Mappings within a single pmap are a certain
1231 * magic distance apart. VAs at these appropriate
1232 * boundaries map to the same cache entries or
1233 * otherwise don't conflict.
1234 *
1235 * To keep it simple, we only check for these special
1236 * cases if there are only two mappings, otherwise we
1237 * punt and always CI.
1238 *
1239 * Note that there are no aliasing problems with the
1240 * on-chip data-cache when the WA bit is set.
1241 */
1242 if (pmap_aliasmask) {
1243 if (pv->pv_flags & PV_CI) {
1244#ifdef DEBUG
1245 if (pmapdebug & PDB_CACHE)
1246 printf("enter: pa %x already CI'ed\n",
1247 pa);
1248#endif
1249 checkpv = cacheable = FALSE;
1250 } else if (npv->pv_next ||
1251 ((pmap == pv->pv_pmap ||
1252 pmap == kernel_pmap ||
1253 pv->pv_pmap == kernel_pmap) &&
1254 ((pv->pv_va & pmap_aliasmask) !=
1255 (va & pmap_aliasmask)))) {
1256#ifdef DEBUG
1257 if (pmapdebug & PDB_CACHE)
1258 printf("enter: pa %x CI'ing all\n",
1259 pa);
1260#endif
1261 cacheable = FALSE;
1262 pv->pv_flags |= PV_CI;
1263#ifdef DEBUG
1264 enter_stats.ci++;
1265#endif
1266 }
1267 }
1268 }
1269 splx(s);
1270 }
1271 /*
1272 * Assumption: if it is not part of our managed memory
1273 * then it must be device memory which may be volitile.
1274 */
1275 else if (pmap_initialized) {
1276 checkpv = cacheable = FALSE;
1277#ifdef DEBUG
1278 enter_stats.unmanaged++;
1279#endif
1280 }
1281
1282 /*
1283 * Increment counters
1284 */
1285 pmap->pm_stats.resident_count++;
1286 if (wired)
1287 pmap->pm_stats.wired_count++;
1288
1289validate:
1290 /*
9acfa6cd
MH
1291 * Purge kernel side of VAC to ensure we get correct state
1292 * of HW bits so we don't clobber them.
8f961915
KM
1293 */
1294 if (pmap_aliasmask)
1295 DCIS();
1296 /*
1297 * Now validate mapping with desired protection/wiring.
1298 * Assume uniform modified and referenced status for all
1299 * HP pages in a MACH page.
1300 */
1301 npte = (pa & PG_FRAME) | pte_prot(pmap, prot) | PG_V;
1302 npte |= (*(int *)pte & (PG_M|PG_U));
1303 if (wired)
1304 npte |= PG_W;
1305 if (!checkpv && !cacheable)
1306 npte |= PG_CI;
9acfa6cd
MH
1307#if defined(HP380)
1308 if (mmutype == MMU_68040 && (npte & (PG_PROT|PG_CI)) == PG_RW)
1309#ifdef DEBUG
1310 if (dowriteback && (dokwriteback || pmap != kernel_pmap))
1311#endif
1312 npte |= PG_CCB;
1313#endif
8f961915
KM
1314#ifdef DEBUG
1315 if (pmapdebug & PDB_ENTER)
1316 printf("enter: new pte value %x\n", npte);
1317#endif
9acfa6cd
MH
1318#if defined(DYNPGSIZE)
1319 {
1320 register int ix = 0;
1321
1322 do {
1323#if defined(HP380)
1324 if (mmutype == MMU_68040) {
1325 DCFP(pa);
1326 ICPP(pa);
1327 }
1328#endif
1329 *(int *)pte++ = npte;
1330 TBIS(va);
1331 npte += HP_PAGE_SIZE;
1332 va += HP_PAGE_SIZE;
1333 } while (++ix != hppagesperpage);
1334 }
1335#else
1336#if defined(HP380)
1337 if (mmutype == MMU_68040) {
1338 DCFP(pa);
1339 ICPP(pa);
1340 }
1341#endif
1342 *(int *)pte = npte;
1343 TBIS(va);
1344#endif
8f961915
KM
1345 /*
1346 * The following is executed if we are entering a second
1347 * (or greater) mapping for a physical page and the mappings
1348 * may create an aliasing problem. In this case we must
1349 * cache inhibit the descriptors involved and flush any
1350 * external VAC.
1351 */
1352 if (checkpv && !cacheable) {
1353 pmap_changebit(pa, PG_CI, TRUE);
1354 DCIA();
1355#ifdef DEBUG
1356 enter_stats.flushes++;
1357#endif
1358#ifdef DEBUG
1359 if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==
1360 (PDB_CACHE|PDB_PVDUMP))
1361 pmap_pvdump(pa);
1362#endif
1363 }
1364#ifdef DEBUG
1365 else if (pmapvacflush & PVF_ENTER) {
1366 if (pmapvacflush & PVF_TOTAL)
1367 DCIA();
1368 else if (pmap == kernel_pmap)
1369 DCIS();
1370 else
1371 DCIU();
1372 }
1373 if ((pmapdebug & PDB_WIRING) && pmap != kernel_pmap) {
9acfa6cd 1374#if defined(DYNPGSIZE)
8f961915 1375 va -= PAGE_SIZE;
9acfa6cd 1376#endif
8f961915
KM
1377 pmap_check_wiring("enter", trunc_page(pmap_pte(pmap, va)));
1378 }
1379#endif
1380}
1381
1382/*
1383 * Routine: pmap_change_wiring
1384 * Function: Change the wiring attribute for a map/virtual-address
1385 * pair.
1386 * In/out conditions:
1387 * The mapping must already exist in the pmap.
1388 */
1389void
1390pmap_change_wiring(pmap, va, wired)
1391 register pmap_t pmap;
1392 vm_offset_t va;
1393 boolean_t wired;
1394{
1395 register pt_entry_t *pte;
8f961915
KM
1396
1397#ifdef DEBUG
1398 if (pmapdebug & PDB_FOLLOW)
1399 printf("pmap_change_wiring(%x, %x, %x)\n", pmap, va, wired);
1400#endif
2059b854 1401 if (pmap == NULL)
8f961915
KM
1402 return;
1403
1404 pte = pmap_pte(pmap, va);
1405#ifdef DEBUG
1406 /*
1407 * Page table page is not allocated.
1408 * Should this ever happen? Ignore it for now,
1409 * we don't want to force allocation of unnecessary PTE pages.
1410 */
9acfa6cd 1411 if (!pmap_ste_v(pmap, va)) {
8f961915
KM
1412 if (pmapdebug & PDB_PARANOIA)
1413 printf("pmap_change_wiring: invalid STE for %x\n", va);
1414 return;
1415 }
1416 /*
1417 * Page not valid. Should this ever happen?
1418 * Just continue and change wiring anyway.
1419 */
1420 if (!pmap_pte_v(pte)) {
1421 if (pmapdebug & PDB_PARANOIA)
1422 printf("pmap_change_wiring: invalid PTE for %x\n", va);
1423 }
1424#endif
1425 if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) {
1426 if (wired)
1427 pmap->pm_stats.wired_count++;
1428 else
1429 pmap->pm_stats.wired_count--;
1430 }
1431 /*
1432 * Wiring is not a hardware characteristic so there is no need
1433 * to invalidate TLB.
1434 */
9acfa6cd
MH
1435#if defined(DYNPGSIZE)
1436 {
1437 register int ix = 0;
1438
1439 do {
1440 pmap_pte_set_w(pte++, wired);
1441 } while (++ix != hppagesperpage);
1442 }
1443#else
1444 pmap_pte_set_w(pte, wired);
1445#endif
8f961915
KM
1446}
1447
1448/*
1449 * Routine: pmap_extract
1450 * Function:
1451 * Extract the physical page address associated
1452 * with the given map/virtual_address pair.
1453 */
1454
1455vm_offset_t
1456pmap_extract(pmap, va)
1457 register pmap_t pmap;
1458 vm_offset_t va;
1459{
1460 register vm_offset_t pa;
1461
1462#ifdef DEBUG
1463 if (pmapdebug & PDB_FOLLOW)
1464 printf("pmap_extract(%x, %x) -> ", pmap, va);
1465#endif
1466 pa = 0;
9acfa6cd 1467 if (pmap && pmap_ste_v(pmap, va))
8f961915
KM
1468 pa = *(int *)pmap_pte(pmap, va);
1469 if (pa)
1470 pa = (pa & PG_FRAME) | (va & ~PG_FRAME);
1471#ifdef DEBUG
1472 if (pmapdebug & PDB_FOLLOW)
1473 printf("%x\n", pa);
1474#endif
1475 return(pa);
1476}
1477
1478/*
1479 * Copy the range specified by src_addr/len
1480 * from the source map to the range dst_addr/len
1481 * in the destination map.
1482 *
1483 * This routine is only advisory and need not do anything.
1484 */
1485void pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
1486 pmap_t dst_pmap;
1487 pmap_t src_pmap;
1488 vm_offset_t dst_addr;
1489 vm_size_t len;
1490 vm_offset_t src_addr;
1491{
1492#ifdef DEBUG
1493 if (pmapdebug & PDB_FOLLOW)
1494 printf("pmap_copy(%x, %x, %x, %x, %x)\n",
1495 dst_pmap, src_pmap, dst_addr, len, src_addr);
1496#endif
1497}
1498
1499/*
1500 * Require that all active physical maps contain no
1501 * incorrect entries NOW. [This update includes
1502 * forcing updates of any address map caching.]
1503 *
1504 * Generally used to insure that a thread about
1505 * to run will see a semantically correct world.
1506 */
1507void pmap_update()
1508{
1509#ifdef DEBUG
1510 if (pmapdebug & PDB_FOLLOW)
1511 printf("pmap_update()\n");
1512#endif
1513 TBIA();
1514}
1515
1516/*
1517 * Routine: pmap_collect
1518 * Function:
1519 * Garbage collects the physical map system for
1520 * pages which are no longer used.
1521 * Success need not be guaranteed -- that is, there
1522 * may well be pages which are not referenced, but
1523 * others may be collected.
1524 * Usage:
1525 * Called by the pageout daemon when pages are scarce.
1526 */
1527void
1528pmap_collect(pmap)
1529 pmap_t pmap;
1530{
1531 register vm_offset_t pa;
1532 register pv_entry_t pv;
1533 register int *pte;
1534 vm_offset_t kpa;
1535 int s;
1536
1537#ifdef DEBUG
1538 int *ste;
1539 int opmapdebug;
1540#endif
1541 if (pmap != kernel_pmap)
1542 return;
1543
1544#ifdef DEBUG
1545 if (pmapdebug & PDB_FOLLOW)
1546 printf("pmap_collect(%x)\n", pmap);
1547 kpt_stats.collectscans++;
1548#endif
1549 s = splimp();
1550 for (pa = vm_first_phys; pa < vm_last_phys; pa += PAGE_SIZE) {
1551 register struct kpt_page *kpt, **pkpt;
1552
1553 /*
1554 * Locate physical pages which are being used as kernel
1555 * page table pages.
1556 */
1557 pv = pa_to_pvh(pa);
1558 if (pv->pv_pmap != kernel_pmap || !(pv->pv_flags & PV_PTPAGE))
1559 continue;
1560 do {
1561 if (pv->pv_ptste && pv->pv_ptpmap == kernel_pmap)
1562 break;
1563 } while (pv = pv->pv_next);
2059b854 1564 if (pv == NULL)
8f961915
KM
1565 continue;
1566#ifdef DEBUG
1567 if (pv->pv_va < (vm_offset_t)Sysmap ||
1568 pv->pv_va >= (vm_offset_t)Sysmap + HP_MAX_PTSIZE)
1569 printf("collect: kernel PT VA out of range\n");
1570 else
1571 goto ok;
1572 pmap_pvdump(pa);
1573 continue;
1574ok:
1575#endif
1576 pte = (int *)(pv->pv_va + HP_PAGE_SIZE);
1577 while (--pte >= (int *)pv->pv_va && *pte == PG_NV)
1578 ;
1579 if (pte >= (int *)pv->pv_va)
1580 continue;
1581
1582#ifdef DEBUG
1583 if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT)) {
1584 printf("collect: freeing KPT page at %x (ste %x@%x)\n",
1585 pv->pv_va, *(int *)pv->pv_ptste, pv->pv_ptste);
1586 opmapdebug = pmapdebug;
1587 pmapdebug |= PDB_PTPAGE;
1588 }
1589
1590 ste = (int *)pv->pv_ptste;
1591#endif
1592 /*
1593 * If all entries were invalid we can remove the page.
1594 * We call pmap_remove to take care of invalidating ST
1595 * and Sysptmap entries.
1596 */
1597 kpa = pmap_extract(pmap, pv->pv_va);
1598 pmap_remove(pmap, pv->pv_va, pv->pv_va + HP_PAGE_SIZE);
1599 /*
1600 * Use the physical address to locate the original
1601 * (kmem_alloc assigned) address for the page and put
1602 * that page back on the free list.
1603 */
1604 for (pkpt = &kpt_used_list, kpt = *pkpt;
1605 kpt != (struct kpt_page *)0;
1606 pkpt = &kpt->kpt_next, kpt = *pkpt)
1607 if (kpt->kpt_pa == kpa)
1608 break;
1609#ifdef DEBUG
1610 if (kpt == (struct kpt_page *)0)
1611 panic("pmap_collect: lost a KPT page");
1612 if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
1613 printf("collect: %x (%x) to free list\n",
1614 kpt->kpt_va, kpa);
1615#endif
1616 *pkpt = kpt->kpt_next;
1617 kpt->kpt_next = kpt_free_list;
1618 kpt_free_list = kpt;
1619#ifdef DEBUG
1620 kpt_stats.kptinuse--;
1621 kpt_stats.collectpages++;
1622 if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
1623 pmapdebug = opmapdebug;
1624
1625 if (*ste)
1626 printf("collect: kernel STE at %x still valid (%x)\n",
1627 ste, *ste);
1628 ste = (int *)&Sysptmap[(st_entry_t *)ste-pmap_ste(kernel_pmap, 0)];
1629 if (*ste)
1630 printf("collect: kernel PTmap at %x still valid (%x)\n",
1631 ste, *ste);
1632#endif
1633 }
1634 splx(s);
1635}
1636
1637void
1638pmap_activate(pmap, pcbp)
1639 register pmap_t pmap;
1640 struct pcb *pcbp;
1641{
1642#ifdef DEBUG
1643 if (pmapdebug & (PDB_FOLLOW|PDB_SEGTAB))
1644 printf("pmap_activate(%x, %x)\n", pmap, pcbp);
1645#endif
2059b854 1646 PMAP_ACTIVATE(pmap, pcbp, pmap == curproc->p_vmspace->vm_map.pmap);
8f961915
KM
1647}
1648
8f961915
KM
1649/*
1650 * pmap_zero_page zeros the specified (machine independent)
1651 * page by mapping the page into virtual memory and using
1652 * bzero to clear its contents, one machine dependent page
1653 * at a time.
9acfa6cd
MH
1654 *
1655 * XXX this is a bad implementation for virtual cache machines
1656 * (320/350) because pmap_enter doesn't cache-inhibit the temporary
1657 * kernel mapping and we wind up with data cached for that KVA.
1658 * It is probably a win for physical cache machines (370/380)
1659 * as the cache loading is not wasted.
8f961915 1660 */
21b1e496 1661void
8f961915 1662pmap_zero_page(phys)
9acfa6cd 1663 vm_offset_t phys;
8f961915 1664{
9acfa6cd
MH
1665 register vm_offset_t kva;
1666 extern caddr_t CADDR1;
8f961915
KM
1667
1668#ifdef DEBUG
1669 if (pmapdebug & PDB_FOLLOW)
1670 printf("pmap_zero_page(%x)\n", phys);
1671#endif
9acfa6cd
MH
1672 kva = (vm_offset_t) CADDR1;
1673#if defined(DYNPGSIZE)
1674 {
1675 register int ix = 0;
1676
1677 do {
1678 pmap_enter(kernel_pmap, kva, phys,
1679 VM_PROT_READ|VM_PROT_WRITE, TRUE);
1680 bzero((caddr_t)kva, HP_PAGE_SIZE);
1681 pmap_remove(kernel_pmap, kva, kva+HP_PAGE_SIZE);
1682 phys += HP_PAGE_SIZE;
1683 } while (++ix != hppagesperpage);
1684 }
1685#else
1686 pmap_enter(kernel_pmap, kva, phys, VM_PROT_READ|VM_PROT_WRITE, TRUE);
1687 bzero((caddr_t)kva, HP_PAGE_SIZE);
1688 pmap_remove(kernel_pmap, kva, kva+PAGE_SIZE);
1689#endif
8f961915
KM
1690}
1691
1692/*
1693 * pmap_copy_page copies the specified (machine independent)
1694 * page by mapping the page into virtual memory and using
1695 * bcopy to copy the page, one machine dependent page at a
1696 * time.
9acfa6cd
MH
1697 *
1698 *
1699 * XXX this is a bad implementation for virtual cache machines
1700 * (320/350) because pmap_enter doesn't cache-inhibit the temporary
1701 * kernel mapping and we wind up with data cached for that KVA.
1702 * It is probably a win for physical cache machines (370/380)
1703 * as the cache loading is not wasted.
8f961915 1704 */
21b1e496 1705void
8f961915 1706pmap_copy_page(src, dst)
9acfa6cd 1707 vm_offset_t src, dst;
8f961915 1708{
9acfa6cd
MH
1709 register vm_offset_t skva, dkva;
1710 extern caddr_t CADDR1, CADDR2;
8f961915
KM
1711
1712#ifdef DEBUG
1713 if (pmapdebug & PDB_FOLLOW)
1714 printf("pmap_copy_page(%x, %x)\n", src, dst);
1715#endif
9acfa6cd
MH
1716 skva = (vm_offset_t) CADDR1;
1717 dkva = (vm_offset_t) CADDR2;
1718#if defined(DYNPGSIZE)
1719 {
1720 register int ix = 0;
8f961915 1721
9acfa6cd
MH
1722 do {
1723 pmap_enter(kernel_pmap, skva, src, VM_PROT_READ, TRUE);
1724 pmap_enter(kernel_pmap, dkva, dst,
1725 VM_PROT_READ|VM_PROT_WRITE, TRUE);
1726 bcopy((caddr_t)skva, (caddr_t)dkva, PAGE_SIZE);
1727 /* CADDR1 and CADDR2 are virtually contiguous */
1728 pmap_remove(kernel_pmap, skva, skva+2*HP_PAGE_SIZE);
1729 src += HP_PAGE_SIZE;
1730 dst += HP_PAGE_SIZE;
1731 } while (++ix != hppagesperpage);
1732 }
1733#else
1734 pmap_enter(kernel_pmap, skva, src, VM_PROT_READ, TRUE);
1735 pmap_enter(kernel_pmap, dkva, dst, VM_PROT_READ|VM_PROT_WRITE, TRUE);
1736 bcopy((caddr_t)skva, (caddr_t)dkva, PAGE_SIZE);
1737 /* CADDR1 and CADDR2 are virtually contiguous */
1738 pmap_remove(kernel_pmap, skva, skva+2*PAGE_SIZE);
1739#endif
1740}
8f961915
KM
1741
1742/*
1743 * Routine: pmap_pageable
1744 * Function:
1745 * Make the specified pages (by pmap, offset)
1746 * pageable (or not) as requested.
1747 *
1748 * A page which is not pageable may not take
1749 * a fault; therefore, its page table entry
1750 * must remain valid for the duration.
1751 *
1752 * This routine is merely advisory; pmap_enter
1753 * will specify that these pages are to be wired
1754 * down (or not) as appropriate.
1755 */
21b1e496 1756void
8f961915
KM
1757pmap_pageable(pmap, sva, eva, pageable)
1758 pmap_t pmap;
1759 vm_offset_t sva, eva;
1760 boolean_t pageable;
1761{
1762#ifdef DEBUG
1763 if (pmapdebug & PDB_FOLLOW)
1764 printf("pmap_pageable(%x, %x, %x, %x)\n",
1765 pmap, sva, eva, pageable);
1766#endif
1767 /*
1768 * If we are making a PT page pageable then all valid
1769 * mappings must be gone from that page. Hence it should
1770 * be all zeros and there is no need to clean it.
1771 * Assumptions:
1772 * - we are called with only one page at a time
1773 * - PT pages have only one pv_table entry
1774 */
1775 if (pmap == kernel_pmap && pageable && sva + PAGE_SIZE == eva) {
1776 register pv_entry_t pv;
1777 register vm_offset_t pa;
1778
1779#ifdef DEBUG
1780 if ((pmapdebug & (PDB_FOLLOW|PDB_PTPAGE)) == PDB_PTPAGE)
1781 printf("pmap_pageable(%x, %x, %x, %x)\n",
1782 pmap, sva, eva, pageable);
1783#endif
9acfa6cd 1784 if (!pmap_ste_v(pmap, sva))
8f961915
KM
1785 return;
1786 pa = pmap_pte_pa(pmap_pte(pmap, sva));
1787 if (pa < vm_first_phys || pa >= vm_last_phys)
1788 return;
1789 pv = pa_to_pvh(pa);
2059b854 1790 if (pv->pv_ptste == NULL)
8f961915
KM
1791 return;
1792#ifdef DEBUG
1793 if (pv->pv_va != sva || pv->pv_next) {
1794 printf("pmap_pageable: bad PT page va %x next %x\n",
1795 pv->pv_va, pv->pv_next);
1796 return;
1797 }
1798#endif
1799 /*
1800 * Mark it unmodified to avoid pageout
1801 */
2059b854 1802 pmap_changebit(pa, PG_M, FALSE);
8f961915
KM
1803#ifdef DEBUG
1804 if (pmapdebug & PDB_PTPAGE)
1805 printf("pmap_pageable: PT page %x(%x) unmodified\n",
1806 sva, *(int *)pmap_pte(pmap, sva));
1807 if (pmapdebug & PDB_WIRING)
1808 pmap_check_wiring("pageable", sva);
1809#endif
1810 }
1811}
1812
1813/*
1814 * Clear the modify bits on the specified physical page.
1815 */
1816
1817void
1818pmap_clear_modify(pa)
1819 vm_offset_t pa;
1820{
1821#ifdef DEBUG
1822 if (pmapdebug & PDB_FOLLOW)
1823 printf("pmap_clear_modify(%x)\n", pa);
1824#endif
1825 pmap_changebit(pa, PG_M, FALSE);
1826}
1827
1828/*
1829 * pmap_clear_reference:
1830 *
1831 * Clear the reference bit on the specified physical page.
1832 */
1833
1834void pmap_clear_reference(pa)
1835 vm_offset_t pa;
1836{
1837#ifdef DEBUG
1838 if (pmapdebug & PDB_FOLLOW)
1839 printf("pmap_clear_reference(%x)\n", pa);
1840#endif
1841 pmap_changebit(pa, PG_U, FALSE);
1842}
1843
1844/*
1845 * pmap_is_referenced:
1846 *
1847 * Return whether or not the specified physical page is referenced
1848 * by any physical maps.
1849 */
1850
1851boolean_t
1852pmap_is_referenced(pa)
1853 vm_offset_t pa;
1854{
1855#ifdef DEBUG
1856 if (pmapdebug & PDB_FOLLOW) {
1857 boolean_t rv = pmap_testbit(pa, PG_U);
1858 printf("pmap_is_referenced(%x) -> %c\n", pa, "FT"[rv]);
1859 return(rv);
1860 }
1861#endif
1862 return(pmap_testbit(pa, PG_U));
1863}
1864
1865/*
1866 * pmap_is_modified:
1867 *
1868 * Return whether or not the specified physical page is modified
1869 * by any physical maps.
1870 */
1871
1872boolean_t
1873pmap_is_modified(pa)
1874 vm_offset_t pa;
1875{
1876#ifdef DEBUG
1877 if (pmapdebug & PDB_FOLLOW) {
1878 boolean_t rv = pmap_testbit(pa, PG_M);
1879 printf("pmap_is_modified(%x) -> %c\n", pa, "FT"[rv]);
1880 return(rv);
1881 }
1882#endif
1883 return(pmap_testbit(pa, PG_M));
1884}
1885
1886vm_offset_t
1887pmap_phys_address(ppn)
1888 int ppn;
1889{
1890 return(hp300_ptob(ppn));
1891}
1892
1893/*
1894 * Miscellaneous support routines follow
1895 */
1896
8f961915
KM
1897/* static */
1898boolean_t
1899pmap_testbit(pa, bit)
1900 register vm_offset_t pa;
1901 int bit;
1902{
1903 register pv_entry_t pv;
9acfa6cd 1904 register int *pte;
8f961915
KM
1905 int s;
1906
1907 if (pa < vm_first_phys || pa >= vm_last_phys)
1908 return(FALSE);
1909
1910 pv = pa_to_pvh(pa);
1911 s = splimp();
1912 /*
1913 * Check saved info first
1914 */
1915 if (pmap_attributes[pa_index(pa)] & bit) {
1916 splx(s);
1917 return(TRUE);
1918 }
1919 /*
1920 * Flush VAC to get correct state of any hardware maintained bits.
1921 */
1922 if (pmap_aliasmask && (bit & (PG_U|PG_M)))
1923 DCIS();
1924 /*
1925 * Not found, check current mappings returning
1926 * immediately if found.
1927 */
2059b854 1928 if (pv->pv_pmap != NULL) {
8f961915
KM
1929 for (; pv; pv = pv->pv_next) {
1930 pte = (int *) pmap_pte(pv->pv_pmap, pv->pv_va);
9acfa6cd
MH
1931#if defined(DYNPGSIZE)
1932 {
1933 register int ix = 0;
1934
1935 do {
1936 if (*pte++ & bit) {
1937 splx(s);
1938 return(TRUE);
1939 }
1940 } while (++ix != hppagesperpage);
1941 }
1942#else
1943 if (*pte & bit) {
1944 splx(s);
1945 return(TRUE);
1946 }
1947#endif
8f961915
KM
1948 }
1949 }
1950 splx(s);
1951 return(FALSE);
1952}
1953
1954/* static */
1955pmap_changebit(pa, bit, setem)
1956 register vm_offset_t pa;
1957 int bit;
1958 boolean_t setem;
1959{
1960 register pv_entry_t pv;
9acfa6cd 1961 register int *pte, npte;
8f961915
KM
1962 vm_offset_t va;
1963 int s;
1964 boolean_t firstpage = TRUE;
1965
1966#ifdef DEBUG
1967 if (pmapdebug & PDB_BITS)
1968 printf("pmap_changebit(%x, %x, %s)\n",
1969 pa, bit, setem ? "set" : "clear");
1970#endif
1971 if (pa < vm_first_phys || pa >= vm_last_phys)
1972 return;
1973
1974 pv = pa_to_pvh(pa);
1975 s = splimp();
1976 /*
1977 * Clear saved attributes (modify, reference)
1978 */
1979 if (!setem)
1980 pmap_attributes[pa_index(pa)] &= ~bit;
9acfa6cd
MH
1981#if defined(HP380)
1982 /*
1983 * If we are changing caching status or protection
1984 * make sure the caches are flushed.
1985 */
1986 if (mmutype == MMU_68040 &&
1987 (bit == PG_RO && setem || (bit & PG_CMASK))) {
1988 DCFP(pa);
1989 ICPP(pa);
1990 }
1991#endif
8f961915
KM
1992 /*
1993 * Loop over all current mappings setting/clearing as appropos
1994 * If setting RO do we need to clear the VAC?
1995 */
2059b854 1996 if (pv->pv_pmap != NULL) {
8f961915
KM
1997#ifdef DEBUG
1998 int toflush = 0;
1999#endif
2000 for (; pv; pv = pv->pv_next) {
2001#ifdef DEBUG
2002 toflush |= (pv->pv_pmap == kernel_pmap) ? 2 : 1;
2003#endif
2004 va = pv->pv_va;
4bc66f7c
MH
2005
2006 /*
2007 * XXX don't write protect pager mappings
2008 */
2009 if (bit == PG_RO) {
2010 extern vm_offset_t pager_sva, pager_eva;
2011
2012 if (va >= pager_sva && va < pager_eva)
2013 continue;
2014 }
2015
8f961915
KM
2016 pte = (int *) pmap_pte(pv->pv_pmap, va);
2017 /*
2018 * Flush VAC to ensure we get correct state of HW bits
2019 * so we don't clobber them.
2020 */
2021 if (firstpage && pmap_aliasmask) {
2022 firstpage = FALSE;
2023 DCIS();
2024 }
9acfa6cd
MH
2025#if defined(DYNPGSIZE)
2026 {
2027 register int ix = 0;
2028
2029 do {
2030 if (setem)
2031 npte = *pte | bit;
2032 else
2033 npte = *pte & ~bit;
2034 if (*pte != npte) {
2035 *pte = npte;
2036 TBIS(va);
2037 }
2038 va += HP_PAGE_SIZE;
2039 pte++;
2040 } while (++ix != hppagesperpage);
2041 }
2042#else
2043 if (setem)
2044 npte = *pte | bit;
2045 else
2046 npte = *pte & ~bit;
2047 if (*pte != npte) {
2048 *pte = npte;
2049 TBIS(va);
2050 }
2051#endif
8f961915
KM
2052 }
2053#ifdef DEBUG
2054 if (setem && bit == PG_RO && (pmapvacflush & PVF_PROTECT)) {
2055 if ((pmapvacflush & PVF_TOTAL) || toflush == 3)
2056 DCIA();
2057 else if (toflush == 2)
2058 DCIS();
2059 else
2060 DCIU();
2061 }
2062#endif
2063 }
2064 splx(s);
2065}
2066
2067/* static */
2068void
2069pmap_enter_ptpage(pmap, va)
2070 register pmap_t pmap;
2071 register vm_offset_t va;
2072{
2073 register vm_offset_t ptpa;
2074 register pv_entry_t pv;
2075 st_entry_t *ste;
2076 int s;
2077
2078#ifdef DEBUG
2079 if (pmapdebug & (PDB_FOLLOW|PDB_ENTER|PDB_PTPAGE))
2080 printf("pmap_enter_ptpage: pmap %x, va %x\n", pmap, va);
2081 enter_stats.ptpneeded++;
2082#endif
2083 /*
2084 * Allocate a segment table if necessary. Note that it is allocated
2085 * from kernel_map and not pt_map. This keeps user page tables
2086 * aligned on segment boundaries in the kernel address space.
2087 * The segment table is wired down. It will be freed whenever the
2088 * reference count drops to zero.
2089 */
2090 if (pmap->pm_stab == Segtabzero) {
2091 pmap->pm_stab = (st_entry_t *)
2092 kmem_alloc(kernel_map, HP_STSIZE);
9acfa6cd
MH
2093 pmap->pm_stpa = (st_entry_t *)
2094 pmap_extract(kernel_pmap, (vm_offset_t)pmap->pm_stab);
2095#if defined(HP380)
2096 if (mmutype == MMU_68040) {
2097#ifdef DEBUG
2098 if (dowriteback && dokwriteback)
2099#endif
2100 pmap_changebit((vm_offset_t)pmap->pm_stab, PG_CCB, 0);
2101 pmap->pm_stfree = protostfree;
2102 }
2103#endif
8f961915
KM
2104 pmap->pm_stchanged = TRUE;
2105 /*
2106 * XXX may have changed segment table pointer for current
2107 * process so update now to reload hardware.
2108 */
2059b854
MK
2109 if (pmap == curproc->p_vmspace->vm_map.pmap)
2110 PMAP_ACTIVATE(pmap, (struct pcb *)curproc->p_addr, 1);
8f961915
KM
2111#ifdef DEBUG
2112 if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
9acfa6cd
MH
2113 printf("enter: pmap %x stab %x(%x)\n",
2114 pmap, pmap->pm_stab, pmap->pm_stpa);
8f961915
KM
2115#endif
2116 }
2117
2118 ste = pmap_ste(pmap, va);
9acfa6cd
MH
2119#if defined(HP380)
2120 /*
2121 * Allocate level 2 descriptor block if necessary
2122 */
2123 if (mmutype == MMU_68040) {
2124 if (!ste->sg_v) {
2125 int ix;
2126 caddr_t addr;
2127
2128 ix = bmtol2(pmap->pm_stfree);
2129 if (ix == -1)
2130 panic("enter: out of address space"); /* XXX */
2131 pmap->pm_stfree &= ~l2tobm(ix);
2132 addr = (caddr_t)&pmap->pm_stab[ix*SG4_LEV2SIZE];
2133 bzero(addr, SG4_LEV2SIZE*sizeof(st_entry_t));
2134 addr = (caddr_t)&pmap->pm_stpa[ix*SG4_LEV2SIZE];
2135 *(int *)ste = (u_int)addr | SG_RW | SG_U | SG_V;
2136#ifdef DEBUG
2137 if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
2138 printf("enter: alloc ste2 %d(%x)\n", ix, addr);
2139#endif
2140 }
2141 ste = pmap_ste2(pmap, va);
2142 /*
2143 * Since a level 2 descriptor maps a block of SG4_LEV3SIZE
2144 * level 3 descriptors, we need a chunk of NPTEPG/SG4_LEV3SIZE
2145 * (16) such descriptors (NBPG/SG4_LEV3SIZE bytes) to map a
2146 * PT page--the unit of allocation. We set `ste' to point
2147 * to the first entry of that chunk which is validated in its
2148 * entirety below.
2149 */
2150 ste = (st_entry_t *)((int)ste & ~(NBPG/SG4_LEV3SIZE-1));
2151#ifdef DEBUG
2152 if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
2153 printf("enter: ste2 %x (%x)\n",
2154 pmap_ste2(pmap, va), ste);
2155#endif
2156 }
2157#endif
8f961915
KM
2158 va = trunc_page((vm_offset_t)pmap_pte(pmap, va));
2159
2160 /*
2161 * In the kernel we allocate a page from the kernel PT page
2162 * free list and map it into the kernel page table map (via
2163 * pmap_enter).
2164 */
2165 if (pmap == kernel_pmap) {
2166 register struct kpt_page *kpt;
2167
2168 s = splimp();
2169 if ((kpt = kpt_free_list) == (struct kpt_page *)0) {
2170 /*
2171 * No PT pages available.
2172 * Try once to free up unused ones.
2173 */
2174#ifdef DEBUG
2175 if (pmapdebug & PDB_COLLECT)
2176 printf("enter: no KPT pages, collecting...\n");
2177#endif
2178 pmap_collect(kernel_pmap);
2179 if ((kpt = kpt_free_list) == (struct kpt_page *)0)
2180 panic("pmap_enter_ptpage: can't get KPT page");
2181 }
2182#ifdef DEBUG
2183 if (++kpt_stats.kptinuse > kpt_stats.kptmaxuse)
2184 kpt_stats.kptmaxuse = kpt_stats.kptinuse;
2185#endif
2186 kpt_free_list = kpt->kpt_next;
2187 kpt->kpt_next = kpt_used_list;
2188 kpt_used_list = kpt;
2189 ptpa = kpt->kpt_pa;
2190 bzero(kpt->kpt_va, HP_PAGE_SIZE);
2191 pmap_enter(pmap, va, ptpa, VM_PROT_DEFAULT, TRUE);
2192#ifdef DEBUG
9acfa6cd
MH
2193 if (pmapdebug & (PDB_ENTER|PDB_PTPAGE)) {
2194 int ix = pmap_ste(pmap, va) - pmap_ste(pmap, 0);
2195
8f961915 2196 printf("enter: add &Sysptmap[%d]: %x (KPT page %x)\n",
9acfa6cd
MH
2197 ix, *(int *)&Sysptmap[ix], kpt->kpt_va);
2198 }
8f961915
KM
2199#endif
2200 splx(s);
2201 }
2202 /*
2203 * For user processes we just simulate a fault on that location
2204 * letting the VM system allocate a zero-filled page.
2205 */
2206 else {
2207#ifdef DEBUG
2208 if (pmapdebug & (PDB_ENTER|PDB_PTPAGE))
2209 printf("enter: about to fault UPT pg at %x\n", va);
9acfa6cd
MH
2210 s = vm_fault(pt_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE);
2211 if (s != KERN_SUCCESS) {
2212 printf("vm_fault(pt_map, %x, RW, 0) -> %d\n", va, s);
2213 panic("pmap_enter: vm_fault failed");
2214 }
2215#else
8f961915
KM
2216 if (vm_fault(pt_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE)
2217 != KERN_SUCCESS)
2218 panic("pmap_enter: vm_fault failed");
9acfa6cd 2219#endif
8f961915
KM
2220 ptpa = pmap_extract(kernel_pmap, va);
2221#ifdef DEBUG
2222 PHYS_TO_VM_PAGE(ptpa)->ptpage = TRUE;
2223#endif
2224 }
9acfa6cd
MH
2225#if defined(HP380)
2226 /*
2227 * Turn off copyback caching of page table pages,
2228 * could get ugly otherwise.
2229 */
2230#ifdef DEBUG
2231 if (dowriteback && dokwriteback)
2232#endif
2233 if (mmutype == MMU_68040) {
2234 int *pte = (int *)pmap_pte(kernel_pmap, va);
2235#ifdef DEBUG
2236 if ((pmapdebug & PDB_PARANOIA) && (*pte & PG_CCB) == 0)
2237 printf("%s PT no CCB: kva=%x ptpa=%x pte@%x=%x\n",
2238 pmap == kernel_pmap ? "Kernel" : "User",
2239 va, ptpa, pte, *pte);
2240#endif
2241 pmap_changebit(ptpa, PG_CCB, 0);
2242 }
2243#endif
8f961915
KM
2244 /*
2245 * Locate the PV entry in the kernel for this PT page and
2246 * record the STE address. This is so that we can invalidate
2247 * the STE when we remove the mapping for the page.
2248 */
2249 pv = pa_to_pvh(ptpa);
2250 s = splimp();
2251 if (pv) {
2252 pv->pv_flags |= PV_PTPAGE;
2253 do {
2254 if (pv->pv_pmap == kernel_pmap && pv->pv_va == va)
2255 break;
2256 } while (pv = pv->pv_next);
2257 }
2258#ifdef DEBUG
2059b854 2259 if (pv == NULL)
8f961915
KM
2260 panic("pmap_enter_ptpage: PT page not entered");
2261#endif
2262 pv->pv_ptste = ste;
2263 pv->pv_ptpmap = pmap;
2264#ifdef DEBUG
2265 if (pmapdebug & (PDB_ENTER|PDB_PTPAGE))
2266 printf("enter: new PT page at PA %x, ste at %x\n", ptpa, ste);
2267#endif
2268
2269 /*
2270 * Map the new PT page into the segment table.
2271 * Also increment the reference count on the segment table if this
2272 * was a user page table page. Note that we don't use vm_map_pageable
2273 * to keep the count like we do for PT pages, this is mostly because
2274 * it would be difficult to identify ST pages in pmap_pageable to
2275 * release them. We also avoid the overhead of vm_map_pageable.
2276 */
9acfa6cd
MH
2277#if defined(HP380)
2278 if (mmutype == MMU_68040) {
2279 st_entry_t *este;
2280
2281 for (este = &ste[NPTEPG/SG4_LEV3SIZE]; ste < este; ste++) {
2282 *(int *)ste = ptpa | SG_U | SG_RW | SG_V;
2283 ptpa += SG4_LEV3SIZE * sizeof(st_entry_t);
2284 }
2285 } else
2286#endif
8f961915
KM
2287 *(int *)ste = (ptpa & SG_FRAME) | SG_RW | SG_V;
2288 if (pmap != kernel_pmap) {
2289 pmap->pm_sref++;
2290#ifdef DEBUG
2291 if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
2292 printf("enter: stab %x refcnt %d\n",
2293 pmap->pm_stab, pmap->pm_sref);
2294#endif
2295 }
2296 /*
2297 * Flush stale TLB info.
2298 */
2299 if (pmap == kernel_pmap)
2300 TBIAS();
2301 else
2302 TBIAU();
2303 pmap->pm_ptpages++;
2304 splx(s);
2305}
2306
2307#ifdef DEBUG
2308pmap_pvdump(pa)
2309 vm_offset_t pa;
2310{
2311 register pv_entry_t pv;
2312
2313 printf("pa %x", pa);
2314 for (pv = pa_to_pvh(pa); pv; pv = pv->pv_next)
2315 printf(" -> pmap %x, va %x, ptste %x, ptpmap %x, flags %x",
2316 pv->pv_pmap, pv->pv_va, pv->pv_ptste, pv->pv_ptpmap,
2317 pv->pv_flags);
2318 printf("\n");
2319}
2320
2321pmap_check_wiring(str, va)
2322 char *str;
2323 vm_offset_t va;
2324{
2325 vm_map_entry_t entry;
2326 register int count, *pte;
2327
2328 va = trunc_page(va);
9acfa6cd 2329 if (!pmap_ste_v(kernel_pmap, va) ||
8f961915
KM
2330 !pmap_pte_v(pmap_pte(kernel_pmap, va)))
2331 return;
2332
2333 if (!vm_map_lookup_entry(pt_map, va, &entry)) {
2334 printf("wired_check: entry for %x not found\n", va);
2335 return;
2336 }
2337 count = 0;
2338 for (pte = (int *)va; pte < (int *)(va+PAGE_SIZE); pte++)
2339 if (*pte)
2340 count++;
2341 if (entry->wired_count != count)
2342 printf("*%s*: %x: w%d/a%d\n",
2343 str, va, entry->wired_count, count);
2344}
2345#endif