add kb_last to reference last element in free chain
[unix-history] / usr / src / sys / kern / kern_malloc.c
CommitLineData
d4202556 1/*
60261b22 2 * Copyright (c) 1987, 1991 The Regents of the University of California.
2420c94a 3 * All rights reserved.
d4202556 4 *
dbf0c423 5 * %sccs.include.redist.c%
2420c94a 6 *
de3e217b 7 * @(#)kern_malloc.c 7.33.1.1 (Berkeley) %G%
d4202556
KM
8 */
9
10#include "param.h"
d4202556
KM
11#include "proc.h"
12#include "map.h"
13#include "kernel.h"
14#include "malloc.h"
cf591ff3
MK
15#include "vm/vm.h"
16#include "vm/vm_kern.h"
d4202556
KM
17
18struct kmembuckets bucket[MINBUCKET + 16];
19struct kmemstats kmemstats[M_LAST];
20struct kmemusage *kmemusage;
ebde2224 21char *memname[] = INITKMEMNAMES;
9d4095a1 22char *kmembase, *kmemlimit;
79ce1cef 23char *memname[] = INITKMEMNAMES;
c029fe44
KS
24long malloc_reentered;
25#define IN { if (malloc_reentered) panic("malloc reentered");\
26 else malloc_reentered = 1;}
27#define OUT (malloc_reentered = 0)
d4202556 28
a211e37d
KM
29#ifdef DIAGNOSTIC
30/*
f53f530d 31 * This structure provides a set of masks to catch unaligned frees.
a211e37d 32 */
f53f530d 33long addrmask[] = { 0,
a211e37d
KM
34 0x00000001, 0x00000003, 0x00000007, 0x0000000f,
35 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
36 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
37 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
38};
05138c22 39
f53f530d
KM
40/*
41 * The WEIRD_ADDR is used as known text to copy into free objects so
42 * that modifications after frees can be detected.
43 */
44#define WEIRD_ADDR 0xdeadbeef
45#define MAX_COPY 32
46
05138c22
KM
47/*
48 * Normally the first word of the structure is used to hold the list
49 * pointer for free objects. However, when running with diagnostics,
50 * we use the third and fourth fields, so as to catch modifications
51 * in the most commonly trashed first two words.
52 */
53struct freelist {
54 long spare0;
05138c22 55 short type;
31527656 56 long spare1;
05138c22
KM
57 caddr_t next;
58};
59#else /* !DIAGNOSTIC */
60struct freelist {
61 caddr_t next;
62};
a211e37d
KM
63#endif /* DIAGNOSTIC */
64
de3e217b
KM
65struct uselist {
66 struct uselist *next;
67 caddr_t mem;
68 long size;
69 long type;
70} *listhd;
71
d4202556
KM
72/*
73 * Allocate a block of memory
74 */
44dd1cfa 75void *
738ba0d6 76malloc(size, type, flags)
d4202556 77 unsigned long size;
47516941 78 int type, flags;
d4202556
KM
79{
80 register struct kmembuckets *kbp;
81 register struct kmemusage *kup;
05138c22 82 register struct freelist *freep;
47516941
MK
83 long indx, npg, alloc, allocsize;
84 int s;
ebde2224 85 caddr_t va, cp, rp;
d4202556 86#ifdef KMEMSTATS
fd78e9f6 87 register struct kmemstats *ksp = &kmemstats[type];
dbe43ea1 88
b8c8091e 89#ifdef DIAGNOSTIC
dbe43ea1 90 if (((unsigned long)type) > M_LAST)
a2aebb63 91 panic("malloc - bogus type");
b8c8091e
KM
92 if (type == M_NAMEI)
93 curproc->p_spare[0]++;
d4202556
KM
94 indx = BUCKETINDX(size);
95 kbp = &bucket[indx];
96 s = splimp();
c029fe44 97 IN;
fd78e9f6 98#ifdef KMEMSTATS
0a4aff4d 99 while (ksp->ks_memuse >= ksp->ks_limit) {
fd78e9f6 100 if (flags & M_NOWAIT) {
c029fe44 101 OUT;
fd78e9f6 102 splx(s);
44dd1cfa 103 return ((void *) NULL);
fd78e9f6
KM
104 }
105 if (ksp->ks_limblocks < 65535)
106 ksp->ks_limblocks++;
c029fe44 107 OUT;
79ce1cef 108 tsleep((caddr_t)ksp, PSWP+2, memname[type], 0);
c029fe44 109 IN;
fd78e9f6 110 }
a211e37d
KM
111#endif
112#ifdef DIAGNOSTIC
f53f530d 113 copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY;
fd78e9f6 114#endif
d4202556
KM
115 if (kbp->kb_next == NULL) {
116 if (size > MAXALLOCSAVE)
117 allocsize = roundup(size, CLBYTES);
118 else
119 allocsize = 1 << indx;
120 npg = clrnd(btoc(allocsize));
9d4095a1
KM
121 va = (caddr_t) kmem_malloc(kmem_map, (vm_size_t)ctob(npg),
122 !(flags & M_NOWAIT));
123 if (va == NULL) {
c029fe44 124 OUT;
d4202556 125 splx(s);
44dd1cfa 126 return ((void *) NULL);
d4202556 127 }
d4202556
KM
128#ifdef KMEMSTATS
129 kbp->kb_total += kbp->kb_elmpercl;
130#endif
131 kup = btokup(va);
132 kup->ku_indx = indx;
133 if (allocsize > MAXALLOCSAVE) {
134 if (npg > 65535)
135 panic("malloc: allocation too large");
136 kup->ku_pagecnt = npg;
fd78e9f6
KM
137#ifdef KMEMSTATS
138 ksp->ks_memuse += allocsize;
139#endif
d4202556
KM
140 goto out;
141 }
142#ifdef KMEMSTATS
143 kup->ku_freecnt = kbp->kb_elmpercl;
144 kbp->kb_totalfree += kbp->kb_elmpercl;
145#endif
fa3523f0
MK
146 /*
147 * Just in case we blocked while allocating memory,
148 * and someone else also allocated memory for this
149 * bucket, don't assume the list is still empty.
150 */
151 savedlist = kbp->kb_next;
ebde2224 152 rp = kbp->kb_next; /* returned while blocked in vmemall */
c029fe44 153 for (cp = kbp->kb_next; cp >= va; cp -= allocsize) {
ebde2224 154 ((caddr_t *)cp)[2] = (cp > va ? cp - allocsize : rp);
c029fe44
KS
155 if (indx == 7) {
156 long *lp = (long *)cp;
157 lp[0] = lp[1] = lp[3] = lp[4] = -1;
158 }
159 }
d4202556
KM
160 }
161 va = kbp->kb_next;
c029fe44
KS
162 kbp->kb_next = ((caddr_t *)va)[2];
163 if (indx == 7) {
164 long *lp = (long *)va;
165 if (lp[0] != -1 || lp[1] != -1 || lp[3] != -1 || lp[4] != -1)
166 panic("malloc meddled");
167 }
d4202556
KM
168#ifdef KMEMSTATS
169 kup = btokup(va);
170 if (kup->ku_indx != indx)
171 panic("malloc: wrong bucket");
172 if (kup->ku_freecnt == 0)
173 panic("malloc: lost data");
174 kup->ku_freecnt--;
175 kbp->kb_totalfree--;
fd78e9f6 176 ksp->ks_memuse += 1 << indx;
d4202556
KM
177out:
178 kbp->kb_calls++;
179 ksp->ks_inuse++;
180 ksp->ks_calls++;
0a4aff4d
KM
181 if (ksp->ks_memuse > ksp->ks_maxused)
182 ksp->ks_maxused = ksp->ks_memuse;
d4202556
KM
183#else
184out:
185#endif
de3e217b
KM
186 if (size > 64 && size <= 128) {
187 mlp = (struct uselist *)malloc(sizeof(*mlp), M_TEMP, M_WAITOK);
188 mlp->type = type;
189 mlp->size = size;
190 mlp->mem = va;
191 mlp->next = listhd;
192 listhd = mlp;
193 }
c029fe44 194 OUT;
d4202556 195 splx(s);
44dd1cfa 196 return ((void *) va);
d4202556
KM
197}
198
71029b66
KM
199#ifdef DIAGNOSTIC
200long addrmask[] = { 0x00000000,
201 0x00000001, 0x00000003, 0x00000007, 0x0000000f,
202 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
203 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
204 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
205};
206#endif /* DIAGNOSTIC */
207
d4202556
KM
208/*
209 * Free a block of memory allocated by malloc.
210 */
738ba0d6
KM
211void
212free(addr, type)
44dd1cfa 213 void *addr;
47516941 214 int type;
d4202556
KM
215{
216 register struct kmembuckets *kbp;
217 register struct kmemusage *kup;
05138c22 218 register struct freelist *freep;
a211e37d 219 long size;
47516941 220 int s;
a211e37d
KM
221#ifdef DIAGNOSTIC
222 caddr_t cp;
f53f530d 223 long *end, *lp, alloc, copysize;
a211e37d 224#endif
fd78e9f6
KM
225#ifdef KMEMSTATS
226 register struct kmemstats *ksp = &kmemstats[type];
227#endif
d4202556
KM
228
229 kup = btokup(addr);
71029b66 230 size = 1 << kup->ku_indx;
ebde2224
KS
231#ifdef DIAGNOSTIC
232 if (size > NBPG * CLSIZE)
233 alloc = addrmask[BUCKETINDX(NBPG * CLSIZE)];
234 else
235 alloc = addrmask[kup->ku_indx];
236 if (((u_long)addr & alloc) != 0) {
237 printf("free: unaligned addr 0x%x, size %d, type %d, mask %d\n",
238 addr, size, type, alloc);
239 panic("free: unaligned addr");
240 }
241#endif /* DIAGNOSTIC */
242 size = 1 << kup->ku_indx;
a211e37d
KM
243 kbp = &bucket[kup->ku_indx];
244 s = splimp();
de3e217b
KM
245 if (size == 128) {
246 struct uselist *mlp, *pmlp;
247
248 mlp = listhd;
249 if (mlp->mem == addr)
250 listhd = mlp->next;
251 else for (pmlp = mlp, mlp = mlp->next ; mlp; mlp = mlp->next) {
252 if (mlp->mem == addr) {
253 pmlp->next = mlp->next;
254 break;
255 }
256 pmlp = mlp;
257 }
258 if (mlp == NULL)
259 printf("free: lost type %s size %d\n", memname[type],
260 size);
261 else
262 free(mlp, M_TEMP);
263 }
71029b66 264#ifdef DIAGNOSTIC
a211e37d
KM
265 /*
266 * Check for returns of data that do not point to the
267 * beginning of the allocation.
268 */
b8c8091e
KM
269 if (type == M_NAMEI)
270 curproc->p_spare[0]--;
71029b66
KM
271 if (size > NBPG * CLSIZE)
272 alloc = addrmask[BUCKETINDX(NBPG * CLSIZE)];
273 else
274 alloc = addrmask[kup->ku_indx];
03bdab7f
KM
275 if (((u_long)addr & alloc) != 0)
276 panic("free: unaligned addr 0x%x, size %d, type %s, mask %d\n",
277 addr, size, memname[type], alloc);
71029b66 278#endif /* DIAGNOSTIC */
c029fe44 279 IN;
0a4aff4d 280 if (size > MAXALLOCSAVE) {
9d4095a1 281 kmem_free(kmem_map, (vm_offset_t)addr, ctob(kup->ku_pagecnt));
d4202556 282#ifdef KMEMSTATS
0a4aff4d
KM
283 size = kup->ku_pagecnt << PGSHIFT;
284 ksp->ks_memuse -= size;
d4202556
KM
285 kup->ku_indx = 0;
286 kup->ku_pagecnt = 0;
0a4aff4d
KM
287 if (ksp->ks_memuse + size >= ksp->ks_limit &&
288 ksp->ks_memuse < ksp->ks_limit)
fd78e9f6
KM
289 wakeup((caddr_t)ksp);
290 ksp->ks_inuse--;
738ba0d6 291 kbp->kb_total -= 1;
d4202556
KM
292#endif
293 splx(s);
294 return;
295 }
05138c22 296 freep = (struct freelist *)addr;
a211e37d
KM
297#ifdef DIAGNOSTIC
298 /*
299 * Check for multiple frees. Use a quick check to see if
300 * it looks free before laboriously searching the freelist.
301 */
05138c22 302 if (freep->spare0 == WEIRD_ADDR) {
f53f530d
KM
303 for (cp = kbp->kb_next; cp; cp = *(caddr_t *)cp) {
304 if (addr != cp)
305 continue;
306 printf("multiply freed item 0x%x\n", addr);
307 panic("free: duplicated free");
a211e37d
KM
308 }
309 }
310 /*
311 * Copy in known text to detect modification after freeing
05138c22
KM
312 * and to make it look free. Also, save the type being freed
313 * so we can list likely culprit if modification is detected
314 * when the object is reallocated.
a211e37d 315 */
f53f530d
KM
316 copysize = size < MAX_COPY ? size : MAX_COPY;
317 end = (long *)&((caddr_t)addr)[copysize];
318 for (lp = (long *)addr; lp < end; lp++)
319 *lp = WEIRD_ADDR;
05138c22 320 freep->type = type;
a211e37d 321#endif /* DIAGNOSTIC */
c029fe44
KS
322 if (size == 128) {
323 long *lp = (long *)addr;
324 lp[0] = lp[1] = lp[3] = lp[4] = -1;
325 }
d4202556
KM
326#ifdef KMEMSTATS
327 kup->ku_freecnt++;
328 if (kup->ku_freecnt >= kbp->kb_elmpercl)
329 if (kup->ku_freecnt > kbp->kb_elmpercl)
330 panic("free: multiple frees");
331 else if (kbp->kb_totalfree > kbp->kb_highwat)
332 kbp->kb_couldfree++;
333 kbp->kb_totalfree++;
0a4aff4d
KM
334 ksp->ks_memuse -= size;
335 if (ksp->ks_memuse + size >= ksp->ks_limit &&
336 ksp->ks_memuse < ksp->ks_limit)
fd78e9f6
KM
337 wakeup((caddr_t)ksp);
338 ksp->ks_inuse--;
d4202556 339#endif
c029fe44 340 ((caddr_t *)addr)[2] = kbp->kb_next;
d4202556 341 kbp->kb_next = addr;
c029fe44 342 OUT;
d4202556
KM
343 splx(s);
344}
345
346/*
347 * Initialize the kernel memory allocator
348 */
349kmeminit()
350{
351 register long indx;
738ba0d6 352 int npg;
d4202556 353
47516941
MK
354#if ((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0)
355 ERROR!_kmeminit:_MAXALLOCSAVE_not_power_of_2
356#endif
357#if (MAXALLOCSAVE > MINALLOCSIZE * 32768)
358 ERROR!_kmeminit:_MAXALLOCSAVE_too_big
359#endif
360#if (MAXALLOCSAVE < CLBYTES)
361 ERROR!_kmeminit:_MAXALLOCSAVE_too_small
362#endif
9d4095a1
KM
363 npg = VM_KMEM_SIZE/ NBPG;
364 kmemusage = (struct kmemusage *) kmem_alloc(kernel_map,
365 (vm_size_t)(npg * sizeof(struct kmemusage)));
8fd6a191
CT
366 kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase,
367 (vm_offset_t *)&kmemlimit, (vm_size_t)(npg * NBPG), FALSE);
d4202556
KM
368#ifdef KMEMSTATS
369 for (indx = 0; indx < MINBUCKET + 16; indx++) {
370 if (1 << indx >= CLBYTES)
371 bucket[indx].kb_elmpercl = 1;
372 else
373 bucket[indx].kb_elmpercl = CLBYTES / (1 << indx);
374 bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
375 }
376 for (indx = 0; indx < M_LAST; indx++)
5aeb6f6a 377 kmemstats[indx].ks_limit = npg * NBPG * 6 / 10;
d4202556
KM
378#endif
379}