BSD 4_4_Lite1 release
[unix-history] / usr / src / sys / pmax / pmax / pmap.c
index 97fa304..15e27de 100644 (file)
@@ -34,7 +34,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- *     @(#)pmap.c      8.1 (Berkeley) 6/10/93
+ *     @(#)pmap.c      8.4 (Berkeley) 1/26/94
  */
 
 /*
  */
 
 /*
@@ -69,6 +69,9 @@
 #include <sys/malloc.h>
 #include <sys/user.h>
 #include <sys/buf.h>
 #include <sys/malloc.h>
 #include <sys/user.h>
 #include <sys/buf.h>
+#ifdef SYSVSHM
+#include <sys/shm.h>
+#endif
 
 #include <vm/vm_kern.h>
 #include <vm/vm_page.h>
 
 #include <vm/vm_kern.h>
 #include <vm/vm_page.h>
@@ -173,12 +176,15 @@ pmap_bootstrap(firstaddr)
            (name) = (type *)firstaddr; firstaddr = (vm_offset_t)((name)+(num))
        /*
         * Allocate a PTE table for the kernel.
            (name) = (type *)firstaddr; firstaddr = (vm_offset_t)((name)+(num))
        /*
         * Allocate a PTE table for the kernel.
-        * The first '256' comes from PAGER_MAP_SIZE in vm_pager_init().
+        * The '1024' comes from PAGER_MAP_SIZE in vm_pager_init().
         * This should be kept in sync.
         * We also reserve space for kmem_alloc_pageable() for vm_fork().
         */
        Sysmapsize = (VM_KMEM_SIZE + VM_MBUF_SIZE + VM_PHYS_SIZE +
         * This should be kept in sync.
         * We also reserve space for kmem_alloc_pageable() for vm_fork().
         */
        Sysmapsize = (VM_KMEM_SIZE + VM_MBUF_SIZE + VM_PHYS_SIZE +
-               nbuf * MAXBSIZE + 16 * NCARGS) / NBPG + 256 + 256;
+               nbuf * MAXBSIZE + 16 * NCARGS) / NBPG + 1024 + 256;
+#ifdef SYSVSHM
+       Sysmapsize += shminfo.shmall;
+#endif
        valloc(Sysmap, pt_entry_t, Sysmapsize);
 #ifdef ATTR
        valloc(pmap_attributes, char, physmem);
        valloc(Sysmap, pt_entry_t, Sysmapsize);
 #ifdef ATTR
        valloc(pmap_attributes, char, physmem);
@@ -1293,7 +1299,7 @@ pmap_phys_address(ppn)
  * Therefore, when we allocate a new PID, we just take the next number. When
  * we run out of numbers, we flush the TLB, increment the generation count
  * and start over. PID zero is reserved for kernel use.
  * Therefore, when we allocate a new PID, we just take the next number. When
  * we run out of numbers, we flush the TLB, increment the generation count
  * and start over. PID zero is reserved for kernel use.
- * This is called only by swtch().
+ * This is called only by switch().
  */
 int
 pmap_alloc_tlbpid(p)
  */
 int
 pmap_alloc_tlbpid(p)
@@ -1403,13 +1409,14 @@ vm_page_alloc1()
 
        spl = splimp();                         /* XXX */
        simple_lock(&vm_page_queue_free_lock);
 
        spl = splimp();                         /* XXX */
        simple_lock(&vm_page_queue_free_lock);
-       if (queue_empty(&vm_page_queue_free)) {
+       if (vm_page_queue_free.tqh_first == NULL) {
                simple_unlock(&vm_page_queue_free_lock);
                splx(spl);
                return (NULL);
        }
 
                simple_unlock(&vm_page_queue_free_lock);
                splx(spl);
                return (NULL);
        }
 
-       queue_remove_first(&vm_page_queue_free, mem, vm_page_t, pageq);
+       mem = vm_page_queue_free.tqh_first;
+       TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
 
        cnt.v_free_count--;
        simple_unlock(&vm_page_queue_free_lock);
 
        cnt.v_free_count--;
        simple_unlock(&vm_page_queue_free_lock);
@@ -1450,13 +1457,13 @@ vm_page_free1(mem)
 {
 
        if (mem->flags & PG_ACTIVE) {
 {
 
        if (mem->flags & PG_ACTIVE) {
-               queue_remove(&vm_page_queue_active, mem, vm_page_t, pageq);
+               TAILQ_REMOVE(&vm_page_queue_active, mem, pageq);
                mem->flags &= ~PG_ACTIVE;
                cnt.v_active_count--;
        }
 
        if (mem->flags & PG_INACTIVE) {
                mem->flags &= ~PG_ACTIVE;
                cnt.v_active_count--;
        }
 
        if (mem->flags & PG_INACTIVE) {
-               queue_remove(&vm_page_queue_inactive, mem, vm_page_t, pageq);
+               TAILQ_REMOVE(&vm_page_queue_inactive, mem, pageq);
                mem->flags &= ~PG_INACTIVE;
                cnt.v_inactive_count--;
        }
                mem->flags &= ~PG_INACTIVE;
                cnt.v_inactive_count--;
        }
@@ -1466,7 +1473,7 @@ vm_page_free1(mem)
 
                spl = splimp();
                simple_lock(&vm_page_queue_free_lock);
 
                spl = splimp();
                simple_lock(&vm_page_queue_free_lock);
-               queue_enter(&vm_page_queue_free, mem, vm_page_t, pageq);
+               TAILQ_INSERT_TAIL(&vm_page_queue_free, mem, pageq);
 
                cnt.v_free_count++;
                simple_unlock(&vm_page_queue_free_lock);
 
                cnt.v_free_count++;
                simple_unlock(&vm_page_queue_free_lock);