add SEF's coypright notice
[unix-history] / usr / src / sys / miscfs / procfs / procfs_mem.c
CommitLineData
66375577
JSP
1/*
2 * Copyright (c) 1993 The Regents of the University of California.
3 * Copyright (c) 1993 Jan-Simon Pendry
e14d91e9 4 * Copyright (c) 1993 Sean Eric Fagan
66375577
JSP
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
ad7740d9 8 * Jan-Simon Pendry and Sean Eric Fagan.
66375577
JSP
9 *
10 * %sccs.include.redist.c%
11 *
e14d91e9 12 * @(#)procfs_mem.c 8.3 (Berkeley) %G%
66375577
JSP
13 *
14 * From:
15 * $Id: procfs_mem.c,v 3.2 1993/12/15 09:40:17 jsp Exp $
16 */
17
18/*
19 * This is a lightly hacked and merged version
20 * of sef's pread/pwrite functions
21 */
22
23#include <sys/param.h>
24#include <sys/systm.h>
25#include <sys/time.h>
26#include <sys/kernel.h>
27#include <sys/proc.h>
28#include <sys/vnode.h>
29#include <miscfs/procfs/procfs.h>
30#include <vm/vm.h>
31#include <vm/vm_kern.h>
32#include <vm/vm_page.h>
33
34static int
35procfs_rwmem(p, uio)
36 struct proc *p;
37 struct uio *uio;
38{
39 int error;
40 int writing;
41
42 writing = uio->uio_rw == UIO_WRITE;
43
44 /*
45 * Only map in one page at a time. We don't have to, but it
46 * makes things easier. This way is trivial - right?
47 */
48 do {
49 vm_map_t map, tmap;
50 vm_object_t object;
51 vm_offset_t kva;
52 vm_offset_t uva;
53 int page_offset; /* offset into page */
54 vm_offset_t pageno; /* page number */
55 vm_map_entry_t out_entry;
56 vm_prot_t out_prot;
57 vm_page_t m;
58 boolean_t wired, single_use;
59 vm_offset_t off;
60 u_int len;
61 int fix_prot;
62
63 uva = (vm_offset_t) uio->uio_offset;
64 if (uva > VM_MAXUSER_ADDRESS) {
65 error = 0;
66 break;
67 }
68
69 /*
70 * Get the page number of this segment.
71 */
72 pageno = trunc_page(uva);
73 page_offset = uva - pageno;
74
75 /*
76 * How many bytes to copy
77 */
78 len = min(PAGE_SIZE - page_offset, uio->uio_resid);
79
80 /*
81 * The map we want...
82 */
83 map = &p->p_vmspace->vm_map;
84
85 /*
86 * Check the permissions for the area we're interested
87 * in.
88 */
89 fix_prot = 0;
90 if (writing)
91 fix_prot = !vm_map_check_protection(map, pageno,
92 pageno + PAGE_SIZE, VM_PROT_WRITE);
93
94 if (fix_prot) {
95 /*
96 * If the page is not writable, we make it so.
97 * XXX It is possible that a page may *not* be
98 * read/executable, if a process changes that!
99 * We will assume, for now, that a page is either
100 * VM_PROT_ALL, or VM_PROT_READ|VM_PROT_EXECUTE.
101 */
102 error = vm_map_protect(map, pageno,
103 pageno + PAGE_SIZE, VM_PROT_ALL, 0);
104 if (error)
105 break;
106 }
107
108 /*
109 * Now we need to get the page. out_entry, out_prot, wired,
110 * and single_use aren't used. One would think the vm code
111 * would be a *bit* nicer... We use tmap because
112 * vm_map_lookup() can change the map argument.
113 */
114 tmap = map;
115 error = vm_map_lookup(&tmap, pageno,
116 writing ? VM_PROT_WRITE : VM_PROT_READ,
117 &out_entry, &object, &off, &out_prot,
118 &wired, &single_use);
119 /*
120 * We're done with tmap now.
121 */
122 if (!error)
123 vm_map_lookup_done(tmap, out_entry);
124
125 /*
126 * Fault the page in...
127 */
128 if (!error && writing && object->shadow) {
129 m = vm_page_lookup(object, off);
130 if (m == 0 || (m->flags & PG_COPYONWRITE))
131 error = vm_fault(map, pageno,
132 VM_PROT_WRITE, FALSE);
133 }
134
135 /* Find space in kernel_map for the page we're interested in */
136 if (!error)
137 error = vm_map_find(kernel_map, object, off, &kva,
138 PAGE_SIZE, 1);
139
140 if (!error) {
141 /*
142 * Neither vm_map_lookup() nor vm_map_find() appear
143 * to add a reference count to the object, so we do
144 * that here and now.
145 */
146 vm_object_reference(object);
147
148 /*
149 * Mark the page we just found as pageable.
150 */
151 error = vm_map_pageable(kernel_map, kva,
152 kva + PAGE_SIZE, 0);
153
154 /*
155 * Now do the i/o move.
156 */
157 if (!error)
158 error = uiomove(kva + page_offset, len, uio);
159
160 vm_map_remove(kernel_map, kva, kva + PAGE_SIZE);
161 }
162 if (fix_prot)
163 vm_map_protect(map, pageno, pageno + PAGE_SIZE,
164 VM_PROT_READ|VM_PROT_EXECUTE, 0);
165 } while (error == 0 && uio->uio_resid > 0);
166
167 return (error);
168}
169
170/*
171 * Copy data in and out of the target process.
172 * We do this by mapping the process's page into
173 * the kernel and then doing a uiomove direct
174 * from the kernel address space.
175 */
176int
177procfs_domem(curp, p, pfs, uio)
178 struct proc *curp;
179 struct proc *p;
180 struct pfsnode *pfs;
181 struct uio *uio;
182{
183 int error;
184
185 if (uio->uio_resid == 0)
186 return (0);
187
188 error = procfs_rwmem(p, uio);
189
190 return (error);
191}
192
193/*
194 * Given process (p), find the vnode from which
195 * it's text segment is being executed.
196 *
197 * It would be nice to grab this information from
198 * the VM system, however, there is no sure-fire
199 * way of doing that. Instead, fork(), exec() and
200 * wait() all maintain the p_textvp field in the
201 * process proc structure which contains a held
202 * reference to the exec'ed vnode.
203 */
204struct vnode *
205procfs_findtextvp(p)
206 struct proc *p;
207{
208 return (p->p_textvp);
209}
210
211
212#ifdef probably_never
213/*
214 * Given process (p), find the vnode from which
215 * it's text segment is being mapped.
216 *
217 * (This is here, rather than in procfs_subr in order
218 * to keep all the VM related code in one place.)
219 */
220struct vnode *
221procfs_findtextvp(p)
222 struct proc *p;
223{
224 int error;
225 vm_object_t object;
226 vm_offset_t pageno; /* page number */
227
228 /* find a vnode pager for the user address space */
229
230 for (pageno = VM_MIN_ADDRESS;
231 pageno < VM_MAXUSER_ADDRESS;
232 pageno += PAGE_SIZE) {
233 vm_map_t map;
234 vm_map_entry_t out_entry;
235 vm_prot_t out_prot;
236 boolean_t wired, single_use;
237 vm_offset_t off;
238
239 map = &p->p_vmspace->vm_map;
240 error = vm_map_lookup(&map, pageno,
241 VM_PROT_READ,
242 &out_entry, &object, &off, &out_prot,
243 &wired, &single_use);
244
245 if (!error) {
246 vm_pager_t pager;
247
248 printf("procfs: found vm object\n");
249 vm_map_lookup_done(map, out_entry);
250 printf("procfs: vm object = %x\n", object);
251
252 /*
253 * At this point, assuming no errors, object
254 * is the VM object mapping UVA (pageno).
255 * Ensure it has a vnode pager, then grab
256 * the vnode from that pager's handle.
257 */
258
259 pager = object->pager;
260 printf("procfs: pager = %x\n", pager);
261 if (pager)
262 printf("procfs: found pager, type = %d\n", pager->pg_type);
263 if (pager && pager->pg_type == PG_VNODE) {
264 struct vnode *vp;
265
266 vp = (struct vnode *) pager->pg_handle;
267 printf("procfs: vp = 0x%x\n", vp);
268 return (vp);
269 }
270 }
271 }
272
273 printf("procfs: text object not found\n");
274 return (0);
275}
276#endif /* probably_never */