BSD 4_4_Lite2 release
[unix-history] / usr / src / sys / vm / vm_glue.c
CommitLineData
175f072e 1/*
ad787160
C
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
175f072e
KM
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
ad787160
C
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
175f072e 35 *
fd88f5c5 36 * @(#)vm_glue.c 8.9 (Berkeley) 3/4/95
0e24ad83
KM
37 *
38 *
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
41 *
42 * Permission to use, copy, modify and distribute this software and
43 * its documentation is hereby granted, provided that both the copyright
44 * notice and this permission notice appear in all copies of the
45 * software, derivative works or modified versions, and any portions
46 * thereof, and that both notices appear in supporting documentation.
47 *
48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51 *
52 * Carnegie Mellon requests users of this software to return to
53 *
54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
55 * School of Computer Science
56 * Carnegie Mellon University
57 * Pittsburgh PA 15213-3890
58 *
59 * any improvements or extensions that they make and grant Carnegie the
60 * rights to redistribute these changes.
175f072e
KM
61 */
62
e3a67891
KB
63#include <sys/param.h>
64#include <sys/systm.h>
65#include <sys/proc.h>
66#include <sys/resourcevar.h>
67#include <sys/buf.h>
68#include <sys/user.h>
175f072e 69
e3a67891
KB
70#include <vm/vm.h>
71#include <vm/vm_page.h>
72#include <vm/vm_kern.h>
175f072e 73
63ab8fd7
KM
74#include <machine/cpu.h>
75
175f072e
KM
76int avefree = 0; /* XXX */
77unsigned maxdmap = MAXDSIZ; /* XXX */
c3385412 78int readbuffers = 0; /* XXX allow kgdb to read kernel buffer pool */
175f072e 79
e3a67891 80int
175f072e
KM
81kernacc(addr, len, rw)
82 caddr_t addr;
83 int len, rw;
84{
85 boolean_t rv;
165f38d6 86 vm_offset_t saddr, eaddr;
175f072e
KM
87 vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
88
165f38d6 89 saddr = trunc_page(addr);
ad787160 90 eaddr = round_page(addr+len);
165f38d6
MH
91 rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
92 /*
93 * XXX there are still some things (e.g. the buffer cache) that
94 * are managed behind the VM system's back so even though an
95 * address is accessible in the mind of the VM system, there may
96 * not be physical pages where the VM thinks there is. This can
97 * lead to bogus allocation of pages in the kernel address space
98 * or worse, inconsistencies at the pmap level. We only worry
99 * about the buffer cache for now.
100 */
c3385412 101 if (!readbuffers && rv && (eaddr > (vm_offset_t)buffers &&
db341dbf 102 saddr < (vm_offset_t)buffers + MAXBSIZE * nbuf))
165f38d6 103 rv = FALSE;
175f072e
KM
104 return(rv == TRUE);
105}
106
e3a67891 107int
175f072e
KM
108useracc(addr, len, rw)
109 caddr_t addr;
110 int len, rw;
111{
112 boolean_t rv;
113 vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
114
ed5c84ba 115 rv = vm_map_check_protection(&curproc->p_vmspace->vm_map,
ad787160 116 trunc_page(addr), round_page(addr+len), prot);
175f072e
KM
117 return(rv == TRUE);
118}
119
120#ifdef KGDB
121/*
9dd0b816 122 * Change protections on kernel pages from addr to addr+len
175f072e 123 * (presumably so debugger can plant a breakpoint).
11944c92
MH
124 *
125 * We force the protection change at the pmap level. If we were
126 * to use vm_map_protect a change to allow writing would be lazily-
127 * applied meaning we would still take a protection fault, something
128 * we really don't want to do. It would also fragment the kernel
129 * map unnecessarily. We cannot use pmap_protect since it also won't
130 * enforce a write-enable request. Using pmap_enter is the only way
131 * we can ensure the change takes place properly.
175f072e 132 */
e3a67891 133void
175f072e
KM
134chgkprot(addr, len, rw)
135 register caddr_t addr;
136 int len, rw;
137{
11944c92
MH
138 vm_prot_t prot;
139 vm_offset_t pa, sva, eva;
175f072e 140
11944c92 141 prot = rw == B_READ ? VM_PROT_READ : VM_PROT_READ|VM_PROT_WRITE;
ad787160 142 eva = round_page(addr + len);
11944c92
MH
143 for (sva = trunc_page(addr); sva < eva; sva += PAGE_SIZE) {
144 /*
145 * Extract physical address for the page.
146 * We use a cheezy hack to differentiate physical
147 * page 0 from an invalid mapping, not that it
148 * really matters...
149 */
150 pa = pmap_extract(kernel_pmap, sva|1);
151 if (pa == 0)
152 panic("chgkprot: invalid page");
d9b16ad7 153 pmap_enter(kernel_pmap, sva, pa&~1, prot, TRUE);
11944c92 154 }
175f072e
KM
155}
156#endif
157
e3a67891 158void
175f072e
KM
159vslock(addr, len)
160 caddr_t addr;
161 u_int len;
162{
ed5c84ba 163 vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr),
ad787160 164 round_page(addr+len), FALSE);
175f072e
KM
165}
166
e3a67891 167void
175f072e
KM
168vsunlock(addr, len, dirtied)
169 caddr_t addr;
170 u_int len;
171 int dirtied;
172{
173#ifdef lint
174 dirtied++;
1524bcb8 175#endif
ed5c84ba 176 vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr),
ad787160 177 round_page(addr+len), TRUE);
175f072e
KM
178}
179
9dd0b816
MK
180/*
181 * Implement fork's actions on an address space.
182 * Here we arrange for the address space to be copied or referenced,
183 * allocate a user struct (pcb and kernel stack), then call the
184 * machine-dependent layer to fill those in and make the new process
185 * ready to run.
186 * NOTE: the kernel stack may be at a different location in the child
187 * process, and thus addresses of automatic variables may be invalid
188 * after cpu_fork returns in the child process. We do nothing here
189 * after cpu_fork returns.
190 */
e3a67891 191int
ed5c84ba
MK
192vm_fork(p1, p2, isvfork)
193 register struct proc *p1, *p2;
175f072e
KM
194 int isvfork;
195{
196 register struct user *up;
197 vm_offset_t addr;
175f072e 198
1e1f624c
WN
199#ifdef i386
200 /*
201 * avoid copying any of the parent's pagetables or other per-process
202 * objects that reside in the map by marking all of them non-inheritable
203 */
204 (void)vm_map_inherit(&p1->p_vmspace->vm_map,
205 UPT_MIN_ADDRESS-UPAGES*NBPG, VM_MAX_ADDRESS, VM_INHERIT_NONE);
206#endif
ed5c84ba
MK
207 p2->p_vmspace = vmspace_fork(p1->p_vmspace);
208
209#ifdef SYSVSHM
210 if (p1->p_vmspace->vm_shm)
211 shmfork(p1, p2, isvfork);
175f072e 212#endif
ed5c84ba 213
cb5fb9b0 214#ifndef i386
175f072e 215 /*
9dd0b816 216 * Allocate a wired-down (for now) pcb and kernel stack for the process
175f072e 217 */
9dd0b816 218 addr = kmem_alloc_pageable(kernel_map, ctob(UPAGES));
a54326c9 219 if (addr == 0)
8ecfe4f9 220 panic("vm_fork: no more kernel virtual memory");
9dd0b816 221 vm_map_pageable(kernel_map, addr, addr + ctob(UPAGES), FALSE);
cb5fb9b0
WN
222#else
223/* XXX somehow, on 386, ocassionally pageout removes active, wired down kstack,
224and pagetables, WITHOUT going thru vm_page_unwire! Why this appears to work is
225not yet clear, yet it does... */
226 addr = kmem_alloc(kernel_map, ctob(UPAGES));
a54326c9 227 if (addr == 0)
8ecfe4f9 228 panic("vm_fork: no more kernel virtual memory");
cb5fb9b0 229#endif
175f072e 230 up = (struct user *)addr;
9dd0b816 231 p2->p_addr = up;
175f072e 232
ed5c84ba
MK
233 /*
234 * p_stats and p_sigacts currently point at fields
235 * in the user struct but not at &u, instead at p_addr.
9dd0b816
MK
236 * Copy p_sigacts and parts of p_stats; zero the rest
237 * of p_stats (statistics).
175f072e 238 */
9dd0b816
MK
239 p2->p_stats = &up->u_stats;
240 p2->p_sigacts = &up->u_sigacts;
241 up->u_sigacts = *p1->p_sigacts;
242 bzero(&up->u_stats.pstat_startzero,
243 (unsigned) ((caddr_t)&up->u_stats.pstat_endzero -
244 (caddr_t)&up->u_stats.pstat_startzero));
245 bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy,
246 ((caddr_t)&up->u_stats.pstat_endcopy -
247 (caddr_t)&up->u_stats.pstat_startcopy));
175f072e 248
165f38d6 249#ifdef i386
165f38d6
MH
250 { u_int addr = UPT_MIN_ADDRESS - UPAGES*NBPG; struct vm_map *vp;
251
252 vp = &p2->p_vmspace->vm_map;
cb5fb9b0 253 (void)vm_deallocate(vp, addr, UPT_MAX_ADDRESS - addr);
165f38d6
MH
254 (void)vm_allocate(vp, &addr, UPT_MAX_ADDRESS - addr, FALSE);
255 (void)vm_map_inherit(vp, addr, UPT_MAX_ADDRESS, VM_INHERIT_NONE);
256 }
257#endif
175f072e 258 /*
9dd0b816
MK
259 * cpu_fork will copy and update the kernel stack and pcb,
260 * and make the child ready to run. It marks the child
261 * so that it can return differently than the parent.
262 * It returns twice, once in the parent process and
263 * once in the child.
175f072e 264 */
9dd0b816 265 return (cpu_fork(p1, p2));
175f072e
KM
266}
267
268/*
ed5c84ba
MK
269 * Set default limits for VM system.
270 * Called for proc 0, and then inherited by all others.
175f072e 271 */
e3a67891 272void
ed5c84ba
MK
273vm_init_limits(p)
274 register struct proc *p;
175f072e 275{
ed5c84ba 276
175f072e
KM
277 /*
278 * Set up the initial limits on process VM.
279 * Set the maximum resident set size to be all
280 * of (reasonably) available memory. This causes
281 * any single, large process to start random page
282 * replacement once it fills memory.
283 */
ed5c84ba
MK
284 p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ;
285 p->p_rlimit[RLIMIT_STACK].rlim_max = MAXSSIZ;
286 p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ;
287 p->p_rlimit[RLIMIT_DATA].rlim_max = MAXDSIZ;
37511447 288 p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(cnt.v_free_count);
175f072e
KM
289}
290
5548a02f 291#include <vm/vm_pageout.h>
175f072e
KM
292
293#ifdef DEBUG
294int enableswap = 1;
295int swapdebug = 0;
296#define SDB_FOLLOW 1
297#define SDB_SWAPIN 2
298#define SDB_SWAPOUT 4
299#endif
300
301/*
302 * Brutally simple:
303 * 1. Attempt to swapin every swaped-out, runnable process in
304 * order of priority.
305 * 2. If not enough memory, wake the pageout daemon and let it
306 * clear some space.
307 */
e3a67891 308void
5685f766 309scheduler()
175f072e 310{
ed5c84ba
MK
311 register struct proc *p;
312 register int pri;
313 struct proc *pp;
314 int ppri;
175f072e
KM
315 vm_offset_t addr;
316 vm_size_t size;
317
318loop:
319#ifdef DEBUG
aba84174 320 while (!enableswap)
54e3b7a9 321 tsleep((caddr_t)&proc0, PVM, "noswap", 0);
175f072e 322#endif
ed5c84ba
MK
323 pp = NULL;
324 ppri = INT_MIN;
58f7270c 325 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
cf5ef508 326 if (p->p_stat == SRUN && (p->p_flag & P_INMEM) == 0) {
4a4b50a9 327 /* XXX should also penalize based on vm_swrss */
cf5ef508 328 pri = p->p_swtime + p->p_slptime - p->p_nice * 8;
ed5c84ba
MK
329 if (pri > ppri) {
330 pp = p;
331 ppri = pri;
175f072e
KM
332 }
333 }
aba84174 334 }
175f072e
KM
335#ifdef DEBUG
336 if (swapdebug & SDB_FOLLOW)
54e3b7a9 337 printf("scheduler: running, procp %x pri %d\n", pp, ppri);
175f072e
KM
338#endif
339 /*
340 * Nothing to do, back to sleep
341 */
ed5c84ba 342 if ((p = pp) == NULL) {
54e3b7a9 343 tsleep((caddr_t)&proc0, PVM, "scheduler", 0);
175f072e
KM
344 goto loop;
345 }
ed5c84ba 346
175f072e
KM
347 /*
348 * We would like to bring someone in.
349 * This part is really bogus cuz we could deadlock on memory
350 * despite our feeble check.
4a4b50a9 351 * XXX should require at least vm_swrss / 2
175f072e
KM
352 */
353 size = round_page(ctob(UPAGES));
ed5c84ba 354 addr = (vm_offset_t) p->p_addr;
01733b29 355 if (cnt.v_free_count > atop(size)) {
175f072e
KM
356#ifdef DEBUG
357 if (swapdebug & SDB_SWAPIN)
358 printf("swapin: pid %d(%s)@%x, pri %d free %d\n",
ed5c84ba 359 p->p_pid, p->p_comm, p->p_addr,
01733b29 360 ppri, cnt.v_free_count);
175f072e
KM
361#endif
362 vm_map_pageable(kernel_map, addr, addr+size, FALSE);
e4a78ac6
KM
363 /*
364 * Some architectures need to be notified when the
365 * user area has moved to new physical page(s) (e.g.
366 * see pmax/pmax/vm_machdep.c).
367 */
368 cpu_swapin(p);
f32c5c48 369 (void) splstatclock();
ed5c84ba 370 if (p->p_stat == SRUN)
6e36b147 371 setrunqueue(p);
cf5ef508 372 p->p_flag |= P_INMEM;
175f072e 373 (void) spl0();
cf5ef508 374 p->p_swtime = 0;
175f072e
KM
375 goto loop;
376 }
377 /*
378 * Not enough memory, jab the pageout daemon and wait til the
379 * coast is clear.
380 */
381#ifdef DEBUG
382 if (swapdebug & SDB_FOLLOW)
54e3b7a9 383 printf("scheduler: no room for pid %d(%s), free %d\n",
01733b29 384 p->p_pid, p->p_comm, cnt.v_free_count);
175f072e
KM
385#endif
386 (void) splhigh();
387 VM_WAIT;
388 (void) spl0();
389#ifdef DEBUG
390 if (swapdebug & SDB_FOLLOW)
54e3b7a9 391 printf("scheduler: room again, free %d\n", cnt.v_free_count);
175f072e
KM
392#endif
393 goto loop;
394}
395
cf5ef508
KB
396#define swappable(p) \
397 (((p)->p_flag & \
398 (P_SYSTEM | P_INMEM | P_NOSWAP | P_WEXIT | P_PHYSIO)) == P_INMEM)
175f072e
KM
399
400/*
401 * Swapout is driven by the pageout daemon. Very simple, we find eligible
402 * procs and unwire their u-areas. We try to always "swap" at least one
403 * process in case we need the room for a swapin.
ed5c84ba
MK
404 * If any procs have been sleeping/stopped for at least maxslp seconds,
405 * they are swapped. Else, we swap the longest-sleeping or stopped process,
406 * if any, otherwise the longest-resident process.
175f072e 407 */
e3a67891 408void
175f072e
KM
409swapout_threads()
410{
ed5c84ba 411 register struct proc *p;
175f072e
KM
412 struct proc *outp, *outp2;
413 int outpri, outpri2;
414 int didswap = 0;
415 extern int maxslp;
416
417#ifdef DEBUG
418 if (!enableswap)
419 return;
420#endif
421 outp = outp2 = NULL;
ed5c84ba 422 outpri = outpri2 = 0;
58f7270c 423 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
ed5c84ba 424 if (!swappable(p))
175f072e 425 continue;
ed5c84ba 426 switch (p->p_stat) {
175f072e 427 case SRUN:
cf5ef508 428 if (p->p_swtime > outpri2) {
ed5c84ba 429 outp2 = p;
cf5ef508 430 outpri2 = p->p_swtime;
175f072e
KM
431 }
432 continue;
433
434 case SSLEEP:
435 case SSTOP:
62ef9a34 436 if (p->p_slptime >= maxslp) {
ed5c84ba 437 swapout(p);
175f072e 438 didswap++;
ed5c84ba
MK
439 } else if (p->p_slptime > outpri) {
440 outp = p;
441 outpri = p->p_slptime;
175f072e
KM
442 }
443 continue;
444 }
445 }
446 /*
447 * If we didn't get rid of any real duds, toss out the next most
448 * likely sleeping/stopped or running candidate. We only do this
449 * if we are real low on memory since we don't gain much by doing
450 * it (UPAGES pages).
451 */
452 if (didswap == 0 &&
01733b29 453 cnt.v_free_count <= atop(round_page(ctob(UPAGES)))) {
ed5c84ba
MK
454 if ((p = outp) == 0)
455 p = outp2;
175f072e
KM
456#ifdef DEBUG
457 if (swapdebug & SDB_SWAPOUT)
ed5c84ba 458 printf("swapout_threads: no duds, try procp %x\n", p);
175f072e 459#endif
ed5c84ba
MK
460 if (p)
461 swapout(p);
175f072e
KM
462 }
463}
464
e3a67891 465void
175f072e
KM
466swapout(p)
467 register struct proc *p;
468{
469 vm_offset_t addr;
470 vm_size_t size;
471
472#ifdef DEBUG
473 if (swapdebug & SDB_SWAPOUT)
474 printf("swapout: pid %d(%s)@%x, stat %x pri %d free %d\n",
475 p->p_pid, p->p_comm, p->p_addr, p->p_stat,
01733b29 476 p->p_slptime, cnt.v_free_count);
175f072e
KM
477#endif
478 size = round_page(ctob(UPAGES));
479 addr = (vm_offset_t) p->p_addr;
7c02c1d3 480#if defined(hp300) || defined(luna68k)
165f38d6
MH
481 /*
482 * Ugh! u-area is double mapped to a fixed address behind the
483 * back of the VM system and accesses are usually through that
484 * address rather than the per-process address. Hence reference
485 * and modify information are recorded at the fixed address and
486 * lost at context switch time. We assume the u-struct and
487 * kernel stack are always accessed/modified and force it to be so.
488 */
489 {
490 register int i;
491 volatile long tmp;
492
493 for (i = 0; i < UPAGES; i++) {
494 tmp = *(long *)addr; *(long *)addr = tmp;
495 addr += NBPG;
496 }
497 addr = (vm_offset_t) p->p_addr;
498 }
499#endif
5f446058
MT
500#ifdef mips
501 /*
502 * Be sure to save the floating point coprocessor state before
503 * paging out the u-struct.
504 */
505 {
506 extern struct proc *machFPCurProcPtr;
507
508 if (p == machFPCurProcPtr) {
509 MachSaveCurFPState(p);
510 machFPCurProcPtr = (struct proc *)0;
511 }
512 }
513#endif
cb5fb9b0 514#ifndef i386 /* temporary measure till we find spontaineous unwire of kstack */
175f072e 515 vm_map_pageable(kernel_map, addr, addr+size, TRUE);
ed5c84ba 516 pmap_collect(vm_map_pmap(&p->p_vmspace->vm_map));
cb5fb9b0 517#endif
175f072e 518 (void) splhigh();
cf5ef508 519 p->p_flag &= ~P_INMEM;
175f072e
KM
520 if (p->p_stat == SRUN)
521 remrq(p);
522 (void) spl0();
cf5ef508 523 p->p_swtime = 0;
175f072e
KM
524}
525
526/*
527 * The rest of these routines fake thread handling
528 */
529
530void
531assert_wait(event, ruptible)
54e3b7a9 532 void *event;
175f072e
KM
533 boolean_t ruptible;
534{
535#ifdef lint
536 ruptible++;
537#endif
ed5c84ba 538 curproc->p_thread = event;
175f072e
KM
539}
540
541void
542thread_block()
543{
544 int s = splhigh();
545
ed5c84ba 546 if (curproc->p_thread)
54e3b7a9 547 tsleep(curproc->p_thread, PVM, "thrd_block", 0);
175f072e
KM
548 splx(s);
549}
550
16c815b2 551void
175f072e 552thread_sleep(event, lock, ruptible)
54e3b7a9 553 void *event;
175f072e
KM
554 simple_lock_t lock;
555 boolean_t ruptible;
556{
54e3b7a9
CD
557 int s = splhigh();
558
175f072e
KM
559#ifdef lint
560 ruptible++;
561#endif
ed5c84ba 562 curproc->p_thread = event;
175f072e 563 simple_unlock(lock);
ed5c84ba 564 if (curproc->p_thread)
54e3b7a9 565 tsleep(event, PVM, "thrd_sleep", 0);
175f072e
KM
566 splx(s);
567}
568
16c815b2 569void
175f072e 570thread_wakeup(event)
54e3b7a9 571 void *event;
175f072e
KM
572{
573 int s = splhigh();
574
54e3b7a9 575 wakeup(event);
175f072e
KM
576 splx(s);
577}
578
579/*
580 * DEBUG stuff
581 */
582
583int indent = 0;
584
aba84174
CT
585#include <machine/stdarg.h> /* see subr_prf.c */
586
175f072e 587/*ARGSUSED2*/
e3a67891 588void
aba84174
CT
589#if __STDC__
590iprintf(const char *fmt, ...)
591#else
592iprintf(fmt /* , va_alist */)
593 char *fmt;
594 /* va_dcl */
595#endif
175f072e
KM
596{
597 register int i;
aba84174 598 va_list ap;
175f072e 599
aba84174 600 for (i = indent; i >= 8; i -= 8)
165f38d6 601 printf("\t");
aba84174 602 while (--i >= 0)
165f38d6 603 printf(" ");
aba84174
CT
604 va_start(ap, fmt);
605 printf("%r", fmt, ap);
606 va_end(ap);
175f072e 607}