+ return rv;
+}
+
+int
+pwrite (struct proc *procp, unsigned int addr, unsigned int datum) {
+ int rv;
+ vm_map_t map, tmap;
+ vm_object_t object;
+ vm_offset_t kva = 0;
+ int page_offset; /* offset into page */
+ vm_offset_t pageno; /* page number */
+ vm_map_entry_t out_entry;
+ vm_prot_t out_prot;
+ boolean_t wired, single_use;
+ vm_offset_t off;
+ boolean_t fix_prot = 0;
+
+ /* Map page into kernel space */
+
+ map = &procp->p_vmspace->vm_map;
+
+ page_offset = addr - trunc_page(addr);
+ pageno = trunc_page(addr);
+
+ /*
+ * Check the permissions for the area we're interested in.
+ */
+
+ if (vm_map_check_protection (map, pageno, pageno + PAGE_SIZE,
+ VM_PROT_WRITE) == FALSE) {
+ /*
+ * If the page was not writable, we make it so.
+ * XXX It is possible a page may *not* be read/executable,
+ * if a process changes that!
+ */
+ fix_prot = 1;
+ /* The page isn't writable, so let's try making it so... */
+ if ((rv = vm_map_protect (map, pageno, pageno + PAGE_SIZE,
+ VM_PROT_ALL, 0)) != KERN_SUCCESS)
+ return EFAULT; /* I guess... */
+ }
+
+ /*
+ * Now we need to get the page. out_entry, out_prot, wired, and
+ * single_use aren't used. One would think the vm code would be
+ * a *bit* nicer... We use tmap because vm_map_lookup() can
+ * change the map argument.
+ */
+
+ tmap = map;
+ rv = vm_map_lookup (&tmap, pageno, VM_PROT_WRITE, &out_entry,
+ &object, &off, &out_prot, &wired, &single_use);
+ if (rv != KERN_SUCCESS) {
+ return EINVAL;
+ }
+
+ /*
+ * Okay, we've got the page. Let's release tmap.
+ */
+
+ vm_map_lookup_done (tmap, out_entry);
+
+ /*
+ * Fault the page in...
+ */
+
+ rv = vm_fault (map, pageno, VM_PROT_WRITE, FALSE);
+ if (rv != KERN_SUCCESS)
+ return EFAULT;
+
+ /*
+ * The page may need to be faulted in again, it seems.
+ * This covers COW pages, I believe.
+ */
+
+ if (!rv)
+ rv = vm_fault (map, pageno, VM_PROT_WRITE, 0);
+
+ /* Find space in kernel_map for the page we're interested in */
+ rv = vm_map_find (kernel_map, object, off, (vm_offset_t *)&kva,
+ PAGE_SIZE, 1);
+
+ if (!rv) {
+ vm_object_reference (object);
+
+ rv = vm_map_pageable (kernel_map, kva, kva + PAGE_SIZE, 0);
+ if (!rv) {
+ bcopy (&datum, (caddr_t)(kva + page_offset), sizeof datum);
+ }
+ vm_map_remove (kernel_map, kva, kva + PAGE_SIZE);
+ }
+
+ if (fix_prot)
+ vm_map_protect (map, pageno, pageno + PAGE_SIZE,
+ VM_PROT_READ|VM_PROT_EXECUTE, 0);
+ return rv;
+}