+ if (paged == 0) {
+ /*
+ * Read in data segment.
+ */
+ (void) vn_rdwr(UIO_READ, vp, vm->vm_daddr, (int) ep->a_data,
+ (off_t)(toff + ep->a_text), UIO_USERSPACE,
+ (IO_UNIT|IO_NODELOCKED), cred, (int *)0, p);
+ /*
+ * Read in text segment if necessary (0410),
+ * and read-protect it.
+ */
+ if (ep->a_text > 0) {
+ error = vn_rdwr(UIO_READ, vp, vm->vm_taddr,
+ (int)ep->a_text, toff, UIO_USERSPACE,
+ (IO_UNIT|IO_NODELOCKED), cred, (int *)0, p);
+ (void) vm_map_protect(&vm->vm_map,
+ (vm_offset_t)vm->vm_taddr,
+ (vm_offset_t)vm->vm_taddr + trunc_page(ep->a_text),
+ VM_PROT_READ|VM_PROT_EXECUTE, FALSE);
+ }
+ } else {
+ /*
+ * Allocate a region backed by the exec'ed vnode.
+ */
+#ifndef COFF
+ addr = VM_MIN_ADDRESS;
+ size = round_page(xts + ep->a_data);
+ error = vm_mmap(&vm->vm_map, &addr, size, VM_PROT_ALL,
+ MAP_FILE|MAP_COPY|MAP_FIXED,
+ (caddr_t)vp, (vm_offset_t)toff);
+ (void) vm_map_protect(&vm->vm_map, addr, addr + xts,
+ VM_PROT_READ|VM_PROT_EXECUTE, FALSE);
+#else /* COFF */
+ addr = (vm_offset_t)vm->vm_taddr;
+ size = xts;
+ error = vm_mmap(&vm->vm_map, &addr, size,
+ VM_PROT_READ|VM_PROT_EXECUTE,
+ MAP_FILE|MAP_COPY|MAP_FIXED,
+ (caddr_t)vp, (vm_offset_t)toff);
+ toff += size;
+ addr = (vm_offset_t)vm->vm_daddr;
+ size = round_page(ep->a_data);
+ error = vm_mmap(&vm->vm_map, &addr, size, VM_PROT_ALL,
+ MAP_FILE|MAP_COPY|MAP_FIXED,
+ (caddr_t)vp, (vm_offset_t)toff);
+#endif /* COFF */
+ vp->v_flag |= VTEXT;
+ }
+ if (error) {
+badmap:
+ printf("pid %d: VM allocation failure\n", p->p_pid);
+ uprintf("sorry, pid %d was killed in exec: VM allocation\n",
+ p->p_pid);
+ psignal(p, SIGKILL);
+ p->p_flag |= SKEEP;
+ return(error);
+ }