convert VOP_UNLOCK and vrele into vput's; add proc parameter to union_dircache
[unix-history] / usr / src / sys / kern / kern_malloc.c
CommitLineData
d4202556 1/*
436acd38
KB
2 * Copyright (c) 1987, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
d4202556 4 *
dbf0c423 5 * %sccs.include.redist.c%
2420c94a 6 *
1f3d196a 7 * @(#)kern_malloc.c 8.3 (Berkeley) %G%
d4202556
KM
8 */
9
38a01dbe
KB
10#include <sys/param.h>
11#include <sys/proc.h>
12#include <sys/map.h>
13#include <sys/kernel.h>
14#include <sys/malloc.h>
15
16#include <vm/vm.h>
17#include <vm/vm_kern.h>
d4202556
KM
18
19struct kmembuckets bucket[MINBUCKET + 16];
20struct kmemstats kmemstats[M_LAST];
21struct kmemusage *kmemusage;
ebde2224 22char *memname[] = INITKMEMNAMES;
9d4095a1 23char *kmembase, *kmemlimit;
79ce1cef 24char *memname[] = INITKMEMNAMES;
c029fe44
KS
25long malloc_reentered;
26#define IN { if (malloc_reentered) panic("malloc reentered");\
27 else malloc_reentered = 1;}
28#define OUT (malloc_reentered = 0)
d4202556 29
a211e37d
KM
30#ifdef DIAGNOSTIC
31/*
f53f530d 32 * This structure provides a set of masks to catch unaligned frees.
a211e37d 33 */
f53f530d 34long addrmask[] = { 0,
a211e37d
KM
35 0x00000001, 0x00000003, 0x00000007, 0x0000000f,
36 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
37 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
38 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
39};
05138c22 40
f53f530d
KM
41/*
42 * The WEIRD_ADDR is used as known text to copy into free objects so
43 * that modifications after frees can be detected.
44 */
45#define WEIRD_ADDR 0xdeadbeef
46#define MAX_COPY 32
47
05138c22
KM
48/*
49 * Normally the first word of the structure is used to hold the list
50 * pointer for free objects. However, when running with diagnostics,
51 * we use the third and fourth fields, so as to catch modifications
52 * in the most commonly trashed first two words.
53 */
54struct freelist {
55 long spare0;
05138c22 56 short type;
31527656 57 long spare1;
05138c22
KM
58 caddr_t next;
59};
60#else /* !DIAGNOSTIC */
61struct freelist {
62 caddr_t next;
63};
a211e37d
KM
64#endif /* DIAGNOSTIC */
65
de3e217b
KM
66struct uselist {
67 struct uselist *next;
68 caddr_t mem;
69 long size;
70 long type;
71} *listhd;
72
d4202556
KM
73/*
74 * Allocate a block of memory
75 */
44dd1cfa 76void *
738ba0d6 77malloc(size, type, flags)
d4202556 78 unsigned long size;
47516941 79 int type, flags;
d4202556
KM
80{
81 register struct kmembuckets *kbp;
82 register struct kmemusage *kup;
05138c22 83 register struct freelist *freep;
1f3d196a 84 long indx, npg, allocsize;
47516941 85 int s;
ebde2224 86 caddr_t va, cp, rp;
d4202556 87#ifdef KMEMSTATS
fd78e9f6 88 register struct kmemstats *ksp = &kmemstats[type];
dbe43ea1 89
b8c8091e 90#ifdef DIAGNOSTIC
dbe43ea1 91 if (((unsigned long)type) > M_LAST)
a2aebb63 92 panic("malloc - bogus type");
b8c8091e
KM
93 if (type == M_NAMEI)
94 curproc->p_spare[0]++;
d4202556
KM
95 indx = BUCKETINDX(size);
96 kbp = &bucket[indx];
97 s = splimp();
c029fe44 98 IN;
fd78e9f6 99#ifdef KMEMSTATS
0a4aff4d 100 while (ksp->ks_memuse >= ksp->ks_limit) {
fd78e9f6 101 if (flags & M_NOWAIT) {
c029fe44 102 OUT;
fd78e9f6 103 splx(s);
44dd1cfa 104 return ((void *) NULL);
fd78e9f6
KM
105 }
106 if (ksp->ks_limblocks < 65535)
107 ksp->ks_limblocks++;
c029fe44 108 OUT;
79ce1cef 109 tsleep((caddr_t)ksp, PSWP+2, memname[type], 0);
c029fe44 110 IN;
fd78e9f6 111 }
d0f89071 112 ksp->ks_size |= 1 << indx;
a211e37d
KM
113#endif
114#ifdef DIAGNOSTIC
f53f530d 115 copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY;
fd78e9f6 116#endif
d4202556 117 if (kbp->kb_next == NULL) {
65a55d8d 118 kbp->kb_last = NULL;
d4202556
KM
119 if (size > MAXALLOCSAVE)
120 allocsize = roundup(size, CLBYTES);
121 else
122 allocsize = 1 << indx;
123 npg = clrnd(btoc(allocsize));
9d4095a1
KM
124 va = (caddr_t) kmem_malloc(kmem_map, (vm_size_t)ctob(npg),
125 !(flags & M_NOWAIT));
126 if (va == NULL) {
c029fe44 127 OUT;
d4202556 128 splx(s);
44dd1cfa 129 return ((void *) NULL);
d4202556 130 }
d4202556
KM
131#ifdef KMEMSTATS
132 kbp->kb_total += kbp->kb_elmpercl;
133#endif
134 kup = btokup(va);
135 kup->ku_indx = indx;
136 if (allocsize > MAXALLOCSAVE) {
137 if (npg > 65535)
138 panic("malloc: allocation too large");
139 kup->ku_pagecnt = npg;
fd78e9f6
KM
140#ifdef KMEMSTATS
141 ksp->ks_memuse += allocsize;
142#endif
d4202556
KM
143 goto out;
144 }
145#ifdef KMEMSTATS
146 kup->ku_freecnt = kbp->kb_elmpercl;
147 kbp->kb_totalfree += kbp->kb_elmpercl;
148#endif
fa3523f0
MK
149 /*
150 * Just in case we blocked while allocating memory,
151 * and someone else also allocated memory for this
152 * bucket, don't assume the list is still empty.
153 */
154 savedlist = kbp->kb_next;
ebde2224 155 rp = kbp->kb_next; /* returned while blocked in vmemall */
c029fe44 156 for (cp = kbp->kb_next; cp >= va; cp -= allocsize) {
ebde2224 157 ((caddr_t *)cp)[2] = (cp > va ? cp - allocsize : rp);
c029fe44
KS
158 if (indx == 7) {
159 long *lp = (long *)cp;
160 lp[0] = lp[1] = lp[3] = lp[4] = -1;
161 }
162 }
d4202556
KM
163 }
164 va = kbp->kb_next;
c029fe44
KS
165 kbp->kb_next = ((caddr_t *)va)[2];
166 if (indx == 7) {
167 long *lp = (long *)va;
168 if (lp[0] != -1 || lp[1] != -1 || lp[3] != -1 || lp[4] != -1)
169 panic("malloc meddled");
170 }
d4202556
KM
171#ifdef KMEMSTATS
172 kup = btokup(va);
173 if (kup->ku_indx != indx)
174 panic("malloc: wrong bucket");
175 if (kup->ku_freecnt == 0)
176 panic("malloc: lost data");
177 kup->ku_freecnt--;
178 kbp->kb_totalfree--;
fd78e9f6 179 ksp->ks_memuse += 1 << indx;
d4202556
KM
180out:
181 kbp->kb_calls++;
182 ksp->ks_inuse++;
183 ksp->ks_calls++;
0a4aff4d
KM
184 if (ksp->ks_memuse > ksp->ks_maxused)
185 ksp->ks_maxused = ksp->ks_memuse;
d4202556
KM
186#else
187out:
188#endif
de3e217b
KM
189 if (size > 64 && size <= 128) {
190 mlp = (struct uselist *)malloc(sizeof(*mlp), M_TEMP, M_WAITOK);
191 mlp->type = type;
192 mlp->size = size;
193 mlp->mem = va;
194 mlp->next = listhd;
195 listhd = mlp;
196 }
c029fe44 197 OUT;
d4202556 198 splx(s);
44dd1cfa 199 return ((void *) va);
d4202556
KM
200}
201
71029b66
KM
202#ifdef DIAGNOSTIC
203long addrmask[] = { 0x00000000,
204 0x00000001, 0x00000003, 0x00000007, 0x0000000f,
205 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
206 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
207 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
208};
209#endif /* DIAGNOSTIC */
210
d4202556
KM
211/*
212 * Free a block of memory allocated by malloc.
213 */
738ba0d6
KM
214void
215free(addr, type)
44dd1cfa 216 void *addr;
47516941 217 int type;
d4202556
KM
218{
219 register struct kmembuckets *kbp;
220 register struct kmemusage *kup;
05138c22 221 register struct freelist *freep;
a211e37d 222 long size;
47516941 223 int s;
a211e37d
KM
224#ifdef DIAGNOSTIC
225 caddr_t cp;
f53f530d 226 long *end, *lp, alloc, copysize;
a211e37d 227#endif
fd78e9f6
KM
228#ifdef KMEMSTATS
229 register struct kmemstats *ksp = &kmemstats[type];
230#endif
d4202556
KM
231
232 kup = btokup(addr);
71029b66 233 size = 1 << kup->ku_indx;
ebde2224
KS
234#ifdef DIAGNOSTIC
235 if (size > NBPG * CLSIZE)
236 alloc = addrmask[BUCKETINDX(NBPG * CLSIZE)];
237 else
238 alloc = addrmask[kup->ku_indx];
239 if (((u_long)addr & alloc) != 0) {
240 printf("free: unaligned addr 0x%x, size %d, type %d, mask %d\n",
241 addr, size, type, alloc);
242 panic("free: unaligned addr");
243 }
244#endif /* DIAGNOSTIC */
245 size = 1 << kup->ku_indx;
a211e37d
KM
246 kbp = &bucket[kup->ku_indx];
247 s = splimp();
de3e217b
KM
248 if (size == 128) {
249 struct uselist *mlp, *pmlp;
250
251 mlp = listhd;
252 if (mlp->mem == addr)
253 listhd = mlp->next;
254 else for (pmlp = mlp, mlp = mlp->next ; mlp; mlp = mlp->next) {
255 if (mlp->mem == addr) {
256 pmlp->next = mlp->next;
257 break;
258 }
259 pmlp = mlp;
260 }
261 if (mlp == NULL)
262 printf("free: lost type %s size %d\n", memname[type],
263 size);
264 else
265 free(mlp, M_TEMP);
266 }
71029b66 267#ifdef DIAGNOSTIC
a211e37d
KM
268 /*
269 * Check for returns of data that do not point to the
270 * beginning of the allocation.
271 */
b8c8091e
KM
272 if (type == M_NAMEI)
273 curproc->p_spare[0]--;
71029b66
KM
274 if (size > NBPG * CLSIZE)
275 alloc = addrmask[BUCKETINDX(NBPG * CLSIZE)];
276 else
277 alloc = addrmask[kup->ku_indx];
03bdab7f
KM
278 if (((u_long)addr & alloc) != 0)
279 panic("free: unaligned addr 0x%x, size %d, type %s, mask %d\n",
280 addr, size, memname[type], alloc);
71029b66 281#endif /* DIAGNOSTIC */
c029fe44 282 IN;
0a4aff4d 283 if (size > MAXALLOCSAVE) {
9d4095a1 284 kmem_free(kmem_map, (vm_offset_t)addr, ctob(kup->ku_pagecnt));
d4202556 285#ifdef KMEMSTATS
0a4aff4d
KM
286 size = kup->ku_pagecnt << PGSHIFT;
287 ksp->ks_memuse -= size;
d4202556
KM
288 kup->ku_indx = 0;
289 kup->ku_pagecnt = 0;
0a4aff4d
KM
290 if (ksp->ks_memuse + size >= ksp->ks_limit &&
291 ksp->ks_memuse < ksp->ks_limit)
fd78e9f6
KM
292 wakeup((caddr_t)ksp);
293 ksp->ks_inuse--;
738ba0d6 294 kbp->kb_total -= 1;
d4202556
KM
295#endif
296 splx(s);
297 return;
298 }
05138c22 299 freep = (struct freelist *)addr;
a211e37d
KM
300#ifdef DIAGNOSTIC
301 /*
302 * Check for multiple frees. Use a quick check to see if
303 * it looks free before laboriously searching the freelist.
304 */
05138c22 305 if (freep->spare0 == WEIRD_ADDR) {
f53f530d
KM
306 for (cp = kbp->kb_next; cp; cp = *(caddr_t *)cp) {
307 if (addr != cp)
308 continue;
309 printf("multiply freed item 0x%x\n", addr);
310 panic("free: duplicated free");
a211e37d
KM
311 }
312 }
313 /*
314 * Copy in known text to detect modification after freeing
05138c22
KM
315 * and to make it look free. Also, save the type being freed
316 * so we can list likely culprit if modification is detected
317 * when the object is reallocated.
a211e37d 318 */
f53f530d
KM
319 copysize = size < MAX_COPY ? size : MAX_COPY;
320 end = (long *)&((caddr_t)addr)[copysize];
321 for (lp = (long *)addr; lp < end; lp++)
322 *lp = WEIRD_ADDR;
05138c22 323 freep->type = type;
a211e37d 324#endif /* DIAGNOSTIC */
c029fe44
KS
325 if (size == 128) {
326 long *lp = (long *)addr;
327 lp[0] = lp[1] = lp[3] = lp[4] = -1;
328 }
d4202556
KM
329#ifdef KMEMSTATS
330 kup->ku_freecnt++;
331 if (kup->ku_freecnt >= kbp->kb_elmpercl)
332 if (kup->ku_freecnt > kbp->kb_elmpercl)
333 panic("free: multiple frees");
334 else if (kbp->kb_totalfree > kbp->kb_highwat)
335 kbp->kb_couldfree++;
336 kbp->kb_totalfree++;
0a4aff4d
KM
337 ksp->ks_memuse -= size;
338 if (ksp->ks_memuse + size >= ksp->ks_limit &&
339 ksp->ks_memuse < ksp->ks_limit)
fd78e9f6
KM
340 wakeup((caddr_t)ksp);
341 ksp->ks_inuse--;
d4202556 342#endif
65a55d8d
KM
343 if (kbp->kb_next == NULL)
344 kbp->kb_next = addr;
345 else
346 ((struct freelist *)kbp->kb_last)->next = addr;
347 freep->next = NULL;
348 kbp->kb_last = addr;
c029fe44 349 OUT;
d4202556
KM
350 splx(s);
351}
352
353/*
354 * Initialize the kernel memory allocator
355 */
356kmeminit()
357{
358 register long indx;
738ba0d6 359 int npg;
d4202556 360
47516941
MK
361#if ((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0)
362 ERROR!_kmeminit:_MAXALLOCSAVE_not_power_of_2
363#endif
364#if (MAXALLOCSAVE > MINALLOCSIZE * 32768)
365 ERROR!_kmeminit:_MAXALLOCSAVE_too_big
366#endif
367#if (MAXALLOCSAVE < CLBYTES)
368 ERROR!_kmeminit:_MAXALLOCSAVE_too_small
369#endif
9d4095a1
KM
370 npg = VM_KMEM_SIZE/ NBPG;
371 kmemusage = (struct kmemusage *) kmem_alloc(kernel_map,
372 (vm_size_t)(npg * sizeof(struct kmemusage)));
8fd6a191
CT
373 kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase,
374 (vm_offset_t *)&kmemlimit, (vm_size_t)(npg * NBPG), FALSE);
d4202556
KM
375#ifdef KMEMSTATS
376 for (indx = 0; indx < MINBUCKET + 16; indx++) {
377 if (1 << indx >= CLBYTES)
378 bucket[indx].kb_elmpercl = 1;
379 else
380 bucket[indx].kb_elmpercl = CLBYTES / (1 << indx);
381 bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
382 }
383 for (indx = 0; indx < M_LAST; indx++)
5aeb6f6a 384 kmemstats[indx].ks_limit = npg * NBPG * 6 / 10;
d4202556
KM
385#endif
386}