+#ifdef ULTRIXCOMPAT
+ /*
+ * Always start out as an ULTRIX process.
+ * A system call in crt0.o will change us to BSD system calls later.
+ */
+ p->p_md.md_flags |= MDP_ULTRIX;
+#endif
+ p->p_flag |= SEXEC;
+#ifndef COFF
+ addr = VM_MIN_ADDRESS;
+ if (vm_allocate(&vm->vm_map, &addr, xts + ctob(ds), FALSE)) {
+ uprintf("Cannot allocate text+data space\n");
+ error = ENOMEM; /* XXX */
+ goto badmap;
+ }
+ vm->vm_taddr = (caddr_t)VM_MIN_ADDRESS;
+ vm->vm_daddr = (caddr_t)(VM_MIN_ADDRESS + xts);
+#else /* COFF */
+ addr = (vm_offset_t)ep->ex_aout.codeStart;
+ vm->vm_taddr = (caddr_t)addr;
+ if (vm_allocate(&vm->vm_map, &addr, xts, FALSE)) {
+ uprintf("Cannot allocate text space\n");
+ error = ENOMEM; /* XXX */
+ goto badmap;
+ }
+ addr = (vm_offset_t)ep->ex_aout.heapStart;
+ vm->vm_daddr = (caddr_t)addr;
+ if (vm_allocate(&vm->vm_map, &addr, round_page(ctob(ds)), FALSE)) {
+ uprintf("Cannot allocate data space\n");
+ error = ENOMEM; /* XXX */
+ goto badmap;
+ }
+#endif /* COFF */
+ size = round_page(MAXSSIZ); /* XXX */
+#ifdef i386
+ addr = trunc_page(USRSTACK - size) - NBPG; /* XXX */
+#else
+ addr = trunc_page(USRSTACK - size);
+#endif
+ if (vm_allocate(&vm->vm_map, &addr, size, FALSE)) {
+ uprintf("Cannot allocate stack space\n");
+ error = ENOMEM; /* XXX */
+ goto badmap;
+ }
+ size -= round_page(p->p_rlimit[RLIMIT_STACK].rlim_cur);
+ if (vm_map_protect(&vm->vm_map, addr, addr+size, VM_PROT_NONE, FALSE)) {
+ uprintf("Cannot protect stack space\n");
+ error = ENOMEM;
+ goto badmap;
+ }
+ vm->vm_maxsaddr = (caddr_t)addr;
+
+ if (paged == 0) {
+ /*
+ * Read in data segment.
+ */
+ (void) vn_rdwr(UIO_READ, vp, vm->vm_daddr, (int) ep->a_data,
+ (off_t)(toff + ep->a_text), UIO_USERSPACE,
+ (IO_UNIT|IO_NODELOCKED), cred, (int *)0, p);
+ /*
+ * Read in text segment if necessary (0410),
+ * and read-protect it.
+ */
+ if (ep->a_text > 0) {
+ error = vn_rdwr(UIO_READ, vp, vm->vm_taddr,
+ (int)ep->a_text, toff, UIO_USERSPACE,
+ (IO_UNIT|IO_NODELOCKED), cred, (int *)0, p);
+ (void) vm_map_protect(&vm->vm_map,
+ (vm_offset_t)vm->vm_taddr,
+ (vm_offset_t)vm->vm_taddr + trunc_page(ep->a_text),
+ VM_PROT_READ|VM_PROT_EXECUTE, FALSE);
+ }
+ } else {
+ /*
+ * Allocate a region backed by the exec'ed vnode.
+ */
+#ifndef COFF
+ addr = VM_MIN_ADDRESS;
+ size = round_page(xts + ep->a_data);
+ error = vm_mmap(&vm->vm_map, &addr, size, VM_PROT_ALL,
+ MAP_FILE|MAP_COPY|MAP_FIXED,
+ (caddr_t)vp, (vm_offset_t)toff);
+ (void) vm_map_protect(&vm->vm_map, addr, addr + xts,
+ VM_PROT_READ|VM_PROT_EXECUTE, FALSE);
+#else /* COFF */
+ addr = (vm_offset_t)vm->vm_taddr;
+ size = xts;
+ error = vm_mmap(&vm->vm_map, &addr, size,
+ VM_PROT_READ|VM_PROT_EXECUTE,
+ MAP_FILE|MAP_COPY|MAP_FIXED,
+ (caddr_t)vp, (vm_offset_t)toff);
+ toff += size;
+ addr = (vm_offset_t)vm->vm_daddr;
+ size = round_page(ep->a_data);
+ error = vm_mmap(&vm->vm_map, &addr, size, VM_PROT_ALL,
+ MAP_FILE|MAP_COPY|MAP_FIXED,
+ (caddr_t)vp, (vm_offset_t)toff);
+#endif /* COFF */
+ vp->v_flag |= VTEXT;
+ }
+ if (error) {
+badmap:
+ printf("pid %d: VM allocation failure\n", p->p_pid);
+ uprintf("sorry, pid %d was killed in exec: VM allocation\n",
+ p->p_pid);
+ psignal(p, SIGKILL);
+ p->p_flag |= SKEEP;
+ return(error);
+ }