X-Git-Url: https://git.subgeniuskitty.com/unix-history/.git/blobdiff_plain/ad7871609881e73855d0b04da49b486cd93efca7..ed554bc5e4201344d7eaad78263566e79428759c:/usr/src/sys/pmax/pmax/pmap.c diff --git a/usr/src/sys/pmax/pmax/pmap.c b/usr/src/sys/pmax/pmax/pmap.c index 97fa3045a1..15e27dec9a 100644 --- a/usr/src/sys/pmax/pmax/pmap.c +++ b/usr/src/sys/pmax/pmax/pmap.c @@ -34,7 +34,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * @(#)pmap.c 8.1 (Berkeley) 6/10/93 + * @(#)pmap.c 8.4 (Berkeley) 1/26/94 */ /* @@ -69,6 +69,9 @@ #include #include #include +#ifdef SYSVSHM +#include +#endif #include #include @@ -173,12 +176,15 @@ pmap_bootstrap(firstaddr) (name) = (type *)firstaddr; firstaddr = (vm_offset_t)((name)+(num)) /* * Allocate a PTE table for the kernel. - * The first '256' comes from PAGER_MAP_SIZE in vm_pager_init(). + * The '1024' comes from PAGER_MAP_SIZE in vm_pager_init(). * This should be kept in sync. * We also reserve space for kmem_alloc_pageable() for vm_fork(). */ Sysmapsize = (VM_KMEM_SIZE + VM_MBUF_SIZE + VM_PHYS_SIZE + - nbuf * MAXBSIZE + 16 * NCARGS) / NBPG + 256 + 256; + nbuf * MAXBSIZE + 16 * NCARGS) / NBPG + 1024 + 256; +#ifdef SYSVSHM + Sysmapsize += shminfo.shmall; +#endif valloc(Sysmap, pt_entry_t, Sysmapsize); #ifdef ATTR valloc(pmap_attributes, char, physmem); @@ -1293,7 +1299,7 @@ pmap_phys_address(ppn) * Therefore, when we allocate a new PID, we just take the next number. When * we run out of numbers, we flush the TLB, increment the generation count * and start over. PID zero is reserved for kernel use. - * This is called only by swtch(). + * This is called only by switch(). */ int pmap_alloc_tlbpid(p) @@ -1403,13 +1409,14 @@ vm_page_alloc1() spl = splimp(); /* XXX */ simple_lock(&vm_page_queue_free_lock); - if (queue_empty(&vm_page_queue_free)) { + if (vm_page_queue_free.tqh_first == NULL) { simple_unlock(&vm_page_queue_free_lock); splx(spl); return (NULL); } - queue_remove_first(&vm_page_queue_free, mem, vm_page_t, pageq); + mem = vm_page_queue_free.tqh_first; + TAILQ_REMOVE(&vm_page_queue_free, mem, pageq); cnt.v_free_count--; simple_unlock(&vm_page_queue_free_lock); @@ -1450,13 +1457,13 @@ vm_page_free1(mem) { if (mem->flags & PG_ACTIVE) { - queue_remove(&vm_page_queue_active, mem, vm_page_t, pageq); + TAILQ_REMOVE(&vm_page_queue_active, mem, pageq); mem->flags &= ~PG_ACTIVE; cnt.v_active_count--; } if (mem->flags & PG_INACTIVE) { - queue_remove(&vm_page_queue_inactive, mem, vm_page_t, pageq); + TAILQ_REMOVE(&vm_page_queue_inactive, mem, pageq); mem->flags &= ~PG_INACTIVE; cnt.v_inactive_count--; } @@ -1466,7 +1473,7 @@ vm_page_free1(mem) spl = splimp(); simple_lock(&vm_page_queue_free_lock); - queue_enter(&vm_page_queue_free, mem, vm_page_t, pageq); + TAILQ_INSERT_TAIL(&vm_page_queue_free, mem, pageq); cnt.v_free_count++; simple_unlock(&vm_page_queue_free_lock);