statfs.f_bsize => statfs.f_iosize; statfs.f_fsize => statfs.f_bsize (for SunOS)
[unix-history] / usr / src / sys / kern / kern_malloc.c
CommitLineData
d4202556 1/*
60261b22 2 * Copyright (c) 1987, 1991 The Regents of the University of California.
2420c94a 3 * All rights reserved.
d4202556 4 *
dbf0c423 5 * %sccs.include.redist.c%
2420c94a 6 *
07882691 7 * @(#)kern_malloc.c 7.28 (Berkeley) %G%
d4202556
KM
8 */
9
10#include "param.h"
d4202556
KM
11#include "proc.h"
12#include "map.h"
13#include "kernel.h"
14#include "malloc.h"
cf591ff3
MK
15#include "vm/vm.h"
16#include "vm/vm_kern.h"
d4202556
KM
17
18struct kmembuckets bucket[MINBUCKET + 16];
19struct kmemstats kmemstats[M_LAST];
20struct kmemusage *kmemusage;
ebde2224 21char *memname[] = INITKMEMNAMES;
9d4095a1 22char *kmembase, *kmemlimit;
79ce1cef 23char *memname[] = INITKMEMNAMES;
c029fe44
KS
24long malloc_reentered;
25#define IN { if (malloc_reentered) panic("malloc reentered");\
26 else malloc_reentered = 1;}
27#define OUT (malloc_reentered = 0)
d4202556 28
a211e37d
KM
29#ifdef DIAGNOSTIC
30/*
31 * This structure serves two purposes.
32 * The first is to provide a set of masks to catch unaligned frees.
33 * The second is to provide known text to copy into free objects so
34 * that modifications after frees can be detected.
35 */
36#define WEIRD_ADDR 0xdeadbeef
37long addrmask[] = { WEIRD_ADDR,
38 0x00000001, 0x00000003, 0x00000007, 0x0000000f,
39 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
40 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
41 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
42};
05138c22
KM
43
44/*
45 * Normally the first word of the structure is used to hold the list
46 * pointer for free objects. However, when running with diagnostics,
47 * we use the third and fourth fields, so as to catch modifications
48 * in the most commonly trashed first two words.
49 */
50struct freelist {
51 long spare0;
52 long spare1;
53 short type;
54 short spare2;
55 caddr_t next;
56};
57#else /* !DIAGNOSTIC */
58struct freelist {
59 caddr_t next;
60};
a211e37d
KM
61#endif /* DIAGNOSTIC */
62
d4202556
KM
63/*
64 * Allocate a block of memory
65 */
44dd1cfa 66void *
738ba0d6 67malloc(size, type, flags)
d4202556 68 unsigned long size;
47516941 69 int type, flags;
d4202556
KM
70{
71 register struct kmembuckets *kbp;
72 register struct kmemusage *kup;
05138c22 73 register struct freelist *freep;
47516941
MK
74 long indx, npg, alloc, allocsize;
75 int s;
ebde2224 76 caddr_t va, cp, rp;
d4202556 77#ifdef KMEMSTATS
fd78e9f6 78 register struct kmemstats *ksp = &kmemstats[type];
dbe43ea1 79
b8c8091e 80#ifdef DIAGNOSTIC
dbe43ea1 81 if (((unsigned long)type) > M_LAST)
a2aebb63 82 panic("malloc - bogus type");
b8c8091e
KM
83 if (type == M_NAMEI)
84 curproc->p_spare[0]++;
fd78e9f6 85
d4202556
KM
86 indx = BUCKETINDX(size);
87 kbp = &bucket[indx];
88 s = splimp();
c029fe44 89 IN;
fd78e9f6 90#ifdef KMEMSTATS
0a4aff4d 91 while (ksp->ks_memuse >= ksp->ks_limit) {
fd78e9f6 92 if (flags & M_NOWAIT) {
c029fe44 93 OUT;
fd78e9f6 94 splx(s);
44dd1cfa 95 return ((void *) NULL);
fd78e9f6
KM
96 }
97 if (ksp->ks_limblocks < 65535)
98 ksp->ks_limblocks++;
c029fe44 99 OUT;
79ce1cef 100 tsleep((caddr_t)ksp, PSWP+2, memname[type], 0);
c029fe44 101 IN;
fd78e9f6 102 }
a211e37d
KM
103#endif
104#ifdef DIAGNOSTIC
105 copysize = 1 << indx < sizeof addrmask ? 1 << indx : sizeof addrmask;
fd78e9f6 106#endif
d4202556
KM
107 if (kbp->kb_next == NULL) {
108 if (size > MAXALLOCSAVE)
109 allocsize = roundup(size, CLBYTES);
110 else
111 allocsize = 1 << indx;
112 npg = clrnd(btoc(allocsize));
9d4095a1
KM
113 va = (caddr_t) kmem_malloc(kmem_map, (vm_size_t)ctob(npg),
114 !(flags & M_NOWAIT));
115 if (va == NULL) {
c029fe44 116 OUT;
d4202556 117 splx(s);
44dd1cfa 118 return ((void *) NULL);
d4202556 119 }
d4202556
KM
120#ifdef KMEMSTATS
121 kbp->kb_total += kbp->kb_elmpercl;
122#endif
123 kup = btokup(va);
124 kup->ku_indx = indx;
125 if (allocsize > MAXALLOCSAVE) {
126 if (npg > 65535)
127 panic("malloc: allocation too large");
128 kup->ku_pagecnt = npg;
fd78e9f6
KM
129#ifdef KMEMSTATS
130 ksp->ks_memuse += allocsize;
131#endif
d4202556
KM
132 goto out;
133 }
134#ifdef KMEMSTATS
135 kup->ku_freecnt = kbp->kb_elmpercl;
136 kbp->kb_totalfree += kbp->kb_elmpercl;
137#endif
fa3523f0
MK
138 /*
139 * Just in case we blocked while allocating memory,
140 * and someone else also allocated memory for this
141 * bucket, don't assume the list is still empty.
142 */
143 savedlist = kbp->kb_next;
ebde2224 144 rp = kbp->kb_next; /* returned while blocked in vmemall */
d4202556 145 kbp->kb_next = va + (npg * NBPG) - allocsize;
c029fe44 146 for (cp = kbp->kb_next; cp >= va; cp -= allocsize) {
ebde2224 147 ((caddr_t *)cp)[2] = (cp > va ? cp - allocsize : rp);
c029fe44
KS
148 if (indx == 7) {
149 long *lp = (long *)cp;
150 lp[0] = lp[1] = lp[3] = lp[4] = -1;
151 }
152 }
d4202556
KM
153 }
154 va = kbp->kb_next;
c029fe44
KS
155 kbp->kb_next = ((caddr_t *)va)[2];
156 if (indx == 7) {
157 long *lp = (long *)va;
158 if (lp[0] != -1 || lp[1] != -1 || lp[3] != -1 || lp[4] != -1)
159 panic("malloc meddled");
160 }
d4202556
KM
161#ifdef KMEMSTATS
162 kup = btokup(va);
163 if (kup->ku_indx != indx)
164 panic("malloc: wrong bucket");
165 if (kup->ku_freecnt == 0)
166 panic("malloc: lost data");
167 kup->ku_freecnt--;
168 kbp->kb_totalfree--;
fd78e9f6 169 ksp->ks_memuse += 1 << indx;
d4202556
KM
170out:
171 kbp->kb_calls++;
172 ksp->ks_inuse++;
173 ksp->ks_calls++;
0a4aff4d
KM
174 if (ksp->ks_memuse > ksp->ks_maxused)
175 ksp->ks_maxused = ksp->ks_memuse;
d4202556
KM
176#else
177out:
178#endif
c029fe44 179 OUT;
d4202556 180 splx(s);
44dd1cfa 181 return ((void *) va);
d4202556
KM
182}
183
71029b66
KM
184#ifdef DIAGNOSTIC
185long addrmask[] = { 0x00000000,
186 0x00000001, 0x00000003, 0x00000007, 0x0000000f,
187 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
188 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
189 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
190};
191#endif /* DIAGNOSTIC */
192
d4202556
KM
193/*
194 * Free a block of memory allocated by malloc.
195 */
738ba0d6
KM
196void
197free(addr, type)
44dd1cfa 198 void *addr;
47516941 199 int type;
d4202556
KM
200{
201 register struct kmembuckets *kbp;
202 register struct kmemusage *kup;
05138c22 203 register struct freelist *freep;
a211e37d 204 long size;
47516941 205 int s;
a211e37d
KM
206#ifdef DIAGNOSTIC
207 caddr_t cp;
208 long alloc, copysize;
209#endif
fd78e9f6
KM
210#ifdef KMEMSTATS
211 register struct kmemstats *ksp = &kmemstats[type];
212#endif
d4202556
KM
213
214 kup = btokup(addr);
71029b66 215 size = 1 << kup->ku_indx;
ebde2224
KS
216#ifdef DIAGNOSTIC
217 if (size > NBPG * CLSIZE)
218 alloc = addrmask[BUCKETINDX(NBPG * CLSIZE)];
219 else
220 alloc = addrmask[kup->ku_indx];
221 if (((u_long)addr & alloc) != 0) {
222 printf("free: unaligned addr 0x%x, size %d, type %d, mask %d\n",
223 addr, size, type, alloc);
224 panic("free: unaligned addr");
225 }
226#endif /* DIAGNOSTIC */
227 size = 1 << kup->ku_indx;
a211e37d
KM
228 kbp = &bucket[kup->ku_indx];
229 s = splimp();
71029b66 230#ifdef DIAGNOSTIC
a211e37d
KM
231 /*
232 * Check for returns of data that do not point to the
233 * beginning of the allocation.
234 */
b8c8091e
KM
235 if (type == M_NAMEI)
236 curproc->p_spare[0]--;
71029b66
KM
237 if (size > NBPG * CLSIZE)
238 alloc = addrmask[BUCKETINDX(NBPG * CLSIZE)];
239 else
240 alloc = addrmask[kup->ku_indx];
241 if (((u_long)addr & alloc) != 0) {
242 printf("free: unaligned addr 0x%x, size %d, type %d, mask %d\n",
243 addr, size, type, alloc);
244 panic("free: unaligned addr");
245 }
246#endif /* DIAGNOSTIC */
c029fe44 247 IN;
0a4aff4d 248 if (size > MAXALLOCSAVE) {
9d4095a1 249 kmem_free(kmem_map, (vm_offset_t)addr, ctob(kup->ku_pagecnt));
d4202556 250#ifdef KMEMSTATS
0a4aff4d
KM
251 size = kup->ku_pagecnt << PGSHIFT;
252 ksp->ks_memuse -= size;
d4202556
KM
253 kup->ku_indx = 0;
254 kup->ku_pagecnt = 0;
0a4aff4d
KM
255 if (ksp->ks_memuse + size >= ksp->ks_limit &&
256 ksp->ks_memuse < ksp->ks_limit)
fd78e9f6
KM
257 wakeup((caddr_t)ksp);
258 ksp->ks_inuse--;
738ba0d6 259 kbp->kb_total -= 1;
d4202556
KM
260#endif
261 splx(s);
262 return;
263 }
05138c22 264 freep = (struct freelist *)addr;
a211e37d
KM
265#ifdef DIAGNOSTIC
266 /*
267 * Check for multiple frees. Use a quick check to see if
268 * it looks free before laboriously searching the freelist.
269 */
a211e37d 270 copysize = size < sizeof addrmask ? size : sizeof addrmask;
05138c22
KM
271 if (freep->spare0 == WEIRD_ADDR) {
272 freep->type = ((struct freelist *)addrmask)->type;
273 freep->next = ((struct freelist *)addrmask)->next;
274 if (!bcmp(addrmask, addr, copysize)) {
275 for (cp = kbp->kb_next; cp; cp = *(caddr_t *)cp) {
276 if (addr == cp) {
277 printf("multiply freed item 0x%x\n",
278 addr);
279 panic("free: duplicated free");
280 }
a211e37d
KM
281 }
282 }
283 }
284 /*
285 * Copy in known text to detect modification after freeing
05138c22
KM
286 * and to make it look free. Also, save the type being freed
287 * so we can list likely culprit if modification is detected
288 * when the object is reallocated.
a211e37d
KM
289 */
290 bcopy(addrmask, addr, copysize);
05138c22 291 freep->type = type;
a211e37d 292#endif /* DIAGNOSTIC */
c029fe44
KS
293 if (size == 128) {
294 long *lp = (long *)addr;
295 lp[0] = lp[1] = lp[3] = lp[4] = -1;
296 }
d4202556
KM
297#ifdef KMEMSTATS
298 kup->ku_freecnt++;
299 if (kup->ku_freecnt >= kbp->kb_elmpercl)
300 if (kup->ku_freecnt > kbp->kb_elmpercl)
301 panic("free: multiple frees");
302 else if (kbp->kb_totalfree > kbp->kb_highwat)
303 kbp->kb_couldfree++;
304 kbp->kb_totalfree++;
0a4aff4d
KM
305 ksp->ks_memuse -= size;
306 if (ksp->ks_memuse + size >= ksp->ks_limit &&
307 ksp->ks_memuse < ksp->ks_limit)
fd78e9f6
KM
308 wakeup((caddr_t)ksp);
309 ksp->ks_inuse--;
d4202556 310#endif
c029fe44 311 ((caddr_t *)addr)[2] = kbp->kb_next;
d4202556 312 kbp->kb_next = addr;
c029fe44 313 OUT;
d4202556
KM
314 splx(s);
315}
316
317/*
318 * Initialize the kernel memory allocator
319 */
320kmeminit()
321{
322 register long indx;
738ba0d6 323 int npg;
d4202556 324
47516941
MK
325#if ((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0)
326 ERROR!_kmeminit:_MAXALLOCSAVE_not_power_of_2
327#endif
328#if (MAXALLOCSAVE > MINALLOCSIZE * 32768)
329 ERROR!_kmeminit:_MAXALLOCSAVE_too_big
330#endif
331#if (MAXALLOCSAVE < CLBYTES)
332 ERROR!_kmeminit:_MAXALLOCSAVE_too_small
333#endif
9d4095a1
KM
334 npg = VM_KMEM_SIZE/ NBPG;
335 kmemusage = (struct kmemusage *) kmem_alloc(kernel_map,
336 (vm_size_t)(npg * sizeof(struct kmemusage)));
337 kmem_map = kmem_suballoc(kernel_map, (vm_offset_t)&kmembase,
338 (vm_offset_t)&kmemlimit, (vm_size_t)(npg * NBPG), FALSE);
d4202556
KM
339#ifdef KMEMSTATS
340 for (indx = 0; indx < MINBUCKET + 16; indx++) {
341 if (1 << indx >= CLBYTES)
342 bucket[indx].kb_elmpercl = 1;
343 else
344 bucket[indx].kb_elmpercl = CLBYTES / (1 << indx);
345 bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
346 }
347 for (indx = 0; indx < M_LAST; indx++)
5aeb6f6a 348 kmemstats[indx].ks_limit = npg * NBPG * 6 / 10;
d4202556
KM
349#endif
350}