add new statistics
[unix-history] / usr / src / sys / kern / kern_malloc.c
CommitLineData
d4202556 1/*
60261b22 2 * Copyright (c) 1987, 1991 The Regents of the University of California.
2420c94a 3 * All rights reserved.
d4202556 4 *
dbf0c423 5 * %sccs.include.redist.c%
2420c94a 6 *
38a01dbe 7 * @(#)kern_malloc.c 7.35 (Berkeley) %G%
d4202556
KM
8 */
9
38a01dbe
KB
10#include <sys/param.h>
11#include <sys/proc.h>
12#include <sys/map.h>
13#include <sys/kernel.h>
14#include <sys/malloc.h>
15
16#include <vm/vm.h>
17#include <vm/vm_kern.h>
d4202556
KM
18
19struct kmembuckets bucket[MINBUCKET + 16];
20struct kmemstats kmemstats[M_LAST];
21struct kmemusage *kmemusage;
ebde2224 22char *memname[] = INITKMEMNAMES;
9d4095a1 23char *kmembase, *kmemlimit;
79ce1cef 24char *memname[] = INITKMEMNAMES;
c029fe44
KS
25long malloc_reentered;
26#define IN { if (malloc_reentered) panic("malloc reentered");\
27 else malloc_reentered = 1;}
28#define OUT (malloc_reentered = 0)
d4202556 29
a211e37d
KM
30#ifdef DIAGNOSTIC
31/*
f53f530d 32 * This structure provides a set of masks to catch unaligned frees.
a211e37d 33 */
f53f530d 34long addrmask[] = { 0,
a211e37d
KM
35 0x00000001, 0x00000003, 0x00000007, 0x0000000f,
36 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
37 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
38 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
39};
05138c22 40
f53f530d
KM
41/*
42 * The WEIRD_ADDR is used as known text to copy into free objects so
43 * that modifications after frees can be detected.
44 */
45#define WEIRD_ADDR 0xdeadbeef
46#define MAX_COPY 32
47
05138c22
KM
48/*
49 * Normally the first word of the structure is used to hold the list
50 * pointer for free objects. However, when running with diagnostics,
51 * we use the third and fourth fields, so as to catch modifications
52 * in the most commonly trashed first two words.
53 */
54struct freelist {
55 long spare0;
05138c22 56 short type;
31527656 57 long spare1;
05138c22
KM
58 caddr_t next;
59};
60#else /* !DIAGNOSTIC */
61struct freelist {
62 caddr_t next;
63};
a211e37d
KM
64#endif /* DIAGNOSTIC */
65
de3e217b
KM
66struct uselist {
67 struct uselist *next;
68 caddr_t mem;
69 long size;
70 long type;
71} *listhd;
72
d4202556
KM
73/*
74 * Allocate a block of memory
75 */
44dd1cfa 76void *
738ba0d6 77malloc(size, type, flags)
d4202556 78 unsigned long size;
47516941 79 int type, flags;
d4202556
KM
80{
81 register struct kmembuckets *kbp;
82 register struct kmemusage *kup;
05138c22 83 register struct freelist *freep;
47516941
MK
84 long indx, npg, alloc, allocsize;
85 int s;
ebde2224 86 caddr_t va, cp, rp;
d4202556 87#ifdef KMEMSTATS
fd78e9f6 88 register struct kmemstats *ksp = &kmemstats[type];
dbe43ea1 89
b8c8091e 90#ifdef DIAGNOSTIC
dbe43ea1 91 if (((unsigned long)type) > M_LAST)
a2aebb63 92 panic("malloc - bogus type");
b8c8091e
KM
93 if (type == M_NAMEI)
94 curproc->p_spare[0]++;
d4202556
KM
95 indx = BUCKETINDX(size);
96 kbp = &bucket[indx];
97 s = splimp();
c029fe44 98 IN;
fd78e9f6 99#ifdef KMEMSTATS
0a4aff4d 100 while (ksp->ks_memuse >= ksp->ks_limit) {
fd78e9f6 101 if (flags & M_NOWAIT) {
c029fe44 102 OUT;
fd78e9f6 103 splx(s);
44dd1cfa 104 return ((void *) NULL);
fd78e9f6
KM
105 }
106 if (ksp->ks_limblocks < 65535)
107 ksp->ks_limblocks++;
c029fe44 108 OUT;
79ce1cef 109 tsleep((caddr_t)ksp, PSWP+2, memname[type], 0);
c029fe44 110 IN;
fd78e9f6 111 }
a211e37d
KM
112#endif
113#ifdef DIAGNOSTIC
f53f530d 114 copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY;
fd78e9f6 115#endif
d4202556 116 if (kbp->kb_next == NULL) {
65a55d8d 117 kbp->kb_last = NULL;
d4202556
KM
118 if (size > MAXALLOCSAVE)
119 allocsize = roundup(size, CLBYTES);
120 else
121 allocsize = 1 << indx;
122 npg = clrnd(btoc(allocsize));
9d4095a1
KM
123 va = (caddr_t) kmem_malloc(kmem_map, (vm_size_t)ctob(npg),
124 !(flags & M_NOWAIT));
125 if (va == NULL) {
c029fe44 126 OUT;
d4202556 127 splx(s);
44dd1cfa 128 return ((void *) NULL);
d4202556 129 }
d4202556
KM
130#ifdef KMEMSTATS
131 kbp->kb_total += kbp->kb_elmpercl;
132#endif
133 kup = btokup(va);
134 kup->ku_indx = indx;
135 if (allocsize > MAXALLOCSAVE) {
136 if (npg > 65535)
137 panic("malloc: allocation too large");
138 kup->ku_pagecnt = npg;
fd78e9f6
KM
139#ifdef KMEMSTATS
140 ksp->ks_memuse += allocsize;
141#endif
d4202556
KM
142 goto out;
143 }
144#ifdef KMEMSTATS
145 kup->ku_freecnt = kbp->kb_elmpercl;
146 kbp->kb_totalfree += kbp->kb_elmpercl;
147#endif
fa3523f0
MK
148 /*
149 * Just in case we blocked while allocating memory,
150 * and someone else also allocated memory for this
151 * bucket, don't assume the list is still empty.
152 */
153 savedlist = kbp->kb_next;
ebde2224 154 rp = kbp->kb_next; /* returned while blocked in vmemall */
c029fe44 155 for (cp = kbp->kb_next; cp >= va; cp -= allocsize) {
ebde2224 156 ((caddr_t *)cp)[2] = (cp > va ? cp - allocsize : rp);
c029fe44
KS
157 if (indx == 7) {
158 long *lp = (long *)cp;
159 lp[0] = lp[1] = lp[3] = lp[4] = -1;
160 }
161 }
d4202556
KM
162 }
163 va = kbp->kb_next;
c029fe44
KS
164 kbp->kb_next = ((caddr_t *)va)[2];
165 if (indx == 7) {
166 long *lp = (long *)va;
167 if (lp[0] != -1 || lp[1] != -1 || lp[3] != -1 || lp[4] != -1)
168 panic("malloc meddled");
169 }
d4202556
KM
170#ifdef KMEMSTATS
171 kup = btokup(va);
172 if (kup->ku_indx != indx)
173 panic("malloc: wrong bucket");
174 if (kup->ku_freecnt == 0)
175 panic("malloc: lost data");
176 kup->ku_freecnt--;
177 kbp->kb_totalfree--;
fd78e9f6 178 ksp->ks_memuse += 1 << indx;
d4202556
KM
179out:
180 kbp->kb_calls++;
181 ksp->ks_inuse++;
182 ksp->ks_calls++;
0a4aff4d
KM
183 if (ksp->ks_memuse > ksp->ks_maxused)
184 ksp->ks_maxused = ksp->ks_memuse;
d4202556
KM
185#else
186out:
187#endif
de3e217b
KM
188 if (size > 64 && size <= 128) {
189 mlp = (struct uselist *)malloc(sizeof(*mlp), M_TEMP, M_WAITOK);
190 mlp->type = type;
191 mlp->size = size;
192 mlp->mem = va;
193 mlp->next = listhd;
194 listhd = mlp;
195 }
c029fe44 196 OUT;
d4202556 197 splx(s);
44dd1cfa 198 return ((void *) va);
d4202556
KM
199}
200
71029b66
KM
201#ifdef DIAGNOSTIC
202long addrmask[] = { 0x00000000,
203 0x00000001, 0x00000003, 0x00000007, 0x0000000f,
204 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
205 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
206 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
207};
208#endif /* DIAGNOSTIC */
209
d4202556
KM
210/*
211 * Free a block of memory allocated by malloc.
212 */
738ba0d6
KM
213void
214free(addr, type)
44dd1cfa 215 void *addr;
47516941 216 int type;
d4202556
KM
217{
218 register struct kmembuckets *kbp;
219 register struct kmemusage *kup;
05138c22 220 register struct freelist *freep;
a211e37d 221 long size;
47516941 222 int s;
a211e37d
KM
223#ifdef DIAGNOSTIC
224 caddr_t cp;
f53f530d 225 long *end, *lp, alloc, copysize;
a211e37d 226#endif
fd78e9f6
KM
227#ifdef KMEMSTATS
228 register struct kmemstats *ksp = &kmemstats[type];
229#endif
d4202556
KM
230
231 kup = btokup(addr);
71029b66 232 size = 1 << kup->ku_indx;
ebde2224
KS
233#ifdef DIAGNOSTIC
234 if (size > NBPG * CLSIZE)
235 alloc = addrmask[BUCKETINDX(NBPG * CLSIZE)];
236 else
237 alloc = addrmask[kup->ku_indx];
238 if (((u_long)addr & alloc) != 0) {
239 printf("free: unaligned addr 0x%x, size %d, type %d, mask %d\n",
240 addr, size, type, alloc);
241 panic("free: unaligned addr");
242 }
243#endif /* DIAGNOSTIC */
244 size = 1 << kup->ku_indx;
a211e37d
KM
245 kbp = &bucket[kup->ku_indx];
246 s = splimp();
de3e217b
KM
247 if (size == 128) {
248 struct uselist *mlp, *pmlp;
249
250 mlp = listhd;
251 if (mlp->mem == addr)
252 listhd = mlp->next;
253 else for (pmlp = mlp, mlp = mlp->next ; mlp; mlp = mlp->next) {
254 if (mlp->mem == addr) {
255 pmlp->next = mlp->next;
256 break;
257 }
258 pmlp = mlp;
259 }
260 if (mlp == NULL)
261 printf("free: lost type %s size %d\n", memname[type],
262 size);
263 else
264 free(mlp, M_TEMP);
265 }
71029b66 266#ifdef DIAGNOSTIC
a211e37d
KM
267 /*
268 * Check for returns of data that do not point to the
269 * beginning of the allocation.
270 */
b8c8091e
KM
271 if (type == M_NAMEI)
272 curproc->p_spare[0]--;
71029b66
KM
273 if (size > NBPG * CLSIZE)
274 alloc = addrmask[BUCKETINDX(NBPG * CLSIZE)];
275 else
276 alloc = addrmask[kup->ku_indx];
03bdab7f
KM
277 if (((u_long)addr & alloc) != 0)
278 panic("free: unaligned addr 0x%x, size %d, type %s, mask %d\n",
279 addr, size, memname[type], alloc);
71029b66 280#endif /* DIAGNOSTIC */
c029fe44 281 IN;
0a4aff4d 282 if (size > MAXALLOCSAVE) {
9d4095a1 283 kmem_free(kmem_map, (vm_offset_t)addr, ctob(kup->ku_pagecnt));
d4202556 284#ifdef KMEMSTATS
0a4aff4d
KM
285 size = kup->ku_pagecnt << PGSHIFT;
286 ksp->ks_memuse -= size;
d4202556
KM
287 kup->ku_indx = 0;
288 kup->ku_pagecnt = 0;
0a4aff4d
KM
289 if (ksp->ks_memuse + size >= ksp->ks_limit &&
290 ksp->ks_memuse < ksp->ks_limit)
fd78e9f6
KM
291 wakeup((caddr_t)ksp);
292 ksp->ks_inuse--;
738ba0d6 293 kbp->kb_total -= 1;
d4202556
KM
294#endif
295 splx(s);
296 return;
297 }
05138c22 298 freep = (struct freelist *)addr;
a211e37d
KM
299#ifdef DIAGNOSTIC
300 /*
301 * Check for multiple frees. Use a quick check to see if
302 * it looks free before laboriously searching the freelist.
303 */
05138c22 304 if (freep->spare0 == WEIRD_ADDR) {
f53f530d
KM
305 for (cp = kbp->kb_next; cp; cp = *(caddr_t *)cp) {
306 if (addr != cp)
307 continue;
308 printf("multiply freed item 0x%x\n", addr);
309 panic("free: duplicated free");
a211e37d
KM
310 }
311 }
312 /*
313 * Copy in known text to detect modification after freeing
05138c22
KM
314 * and to make it look free. Also, save the type being freed
315 * so we can list likely culprit if modification is detected
316 * when the object is reallocated.
a211e37d 317 */
f53f530d
KM
318 copysize = size < MAX_COPY ? size : MAX_COPY;
319 end = (long *)&((caddr_t)addr)[copysize];
320 for (lp = (long *)addr; lp < end; lp++)
321 *lp = WEIRD_ADDR;
05138c22 322 freep->type = type;
a211e37d 323#endif /* DIAGNOSTIC */
c029fe44
KS
324 if (size == 128) {
325 long *lp = (long *)addr;
326 lp[0] = lp[1] = lp[3] = lp[4] = -1;
327 }
d4202556
KM
328#ifdef KMEMSTATS
329 kup->ku_freecnt++;
330 if (kup->ku_freecnt >= kbp->kb_elmpercl)
331 if (kup->ku_freecnt > kbp->kb_elmpercl)
332 panic("free: multiple frees");
333 else if (kbp->kb_totalfree > kbp->kb_highwat)
334 kbp->kb_couldfree++;
335 kbp->kb_totalfree++;
0a4aff4d
KM
336 ksp->ks_memuse -= size;
337 if (ksp->ks_memuse + size >= ksp->ks_limit &&
338 ksp->ks_memuse < ksp->ks_limit)
fd78e9f6
KM
339 wakeup((caddr_t)ksp);
340 ksp->ks_inuse--;
d4202556 341#endif
65a55d8d
KM
342 if (kbp->kb_next == NULL)
343 kbp->kb_next = addr;
344 else
345 ((struct freelist *)kbp->kb_last)->next = addr;
346 freep->next = NULL;
347 kbp->kb_last = addr;
c029fe44 348 OUT;
d4202556
KM
349 splx(s);
350}
351
352/*
353 * Initialize the kernel memory allocator
354 */
355kmeminit()
356{
357 register long indx;
738ba0d6 358 int npg;
d4202556 359
47516941
MK
360#if ((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0)
361 ERROR!_kmeminit:_MAXALLOCSAVE_not_power_of_2
362#endif
363#if (MAXALLOCSAVE > MINALLOCSIZE * 32768)
364 ERROR!_kmeminit:_MAXALLOCSAVE_too_big
365#endif
366#if (MAXALLOCSAVE < CLBYTES)
367 ERROR!_kmeminit:_MAXALLOCSAVE_too_small
368#endif
9d4095a1
KM
369 npg = VM_KMEM_SIZE/ NBPG;
370 kmemusage = (struct kmemusage *) kmem_alloc(kernel_map,
371 (vm_size_t)(npg * sizeof(struct kmemusage)));
8fd6a191
CT
372 kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase,
373 (vm_offset_t *)&kmemlimit, (vm_size_t)(npg * NBPG), FALSE);
d4202556
KM
374#ifdef KMEMSTATS
375 for (indx = 0; indx < MINBUCKET + 16; indx++) {
376 if (1 << indx >= CLBYTES)
377 bucket[indx].kb_elmpercl = 1;
378 else
379 bucket[indx].kb_elmpercl = CLBYTES / (1 << indx);
380 bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
381 }
382 for (indx = 0; indx < M_LAST; indx++)
5aeb6f6a 383 kmemstats[indx].ks_limit = npg * NBPG * 6 / 10;
d4202556
KM
384#endif
385}