delete hardware speed, add machine byte order
[unix-history] / usr / src / sys / kern / kern_malloc.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 1987, 1991 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * %sccs.include.redist.c%
6 *
7 * @(#)kern_malloc.c 7.38 (Berkeley) %G%
8 */
9
10#include <sys/param.h>
11#include <sys/proc.h>
12#include <sys/map.h>
13#include <sys/kernel.h>
14#include <sys/malloc.h>
15
16#include <vm/vm.h>
17#include <vm/vm_kern.h>
18
19struct kmembuckets bucket[MINBUCKET + 16];
20struct kmemstats kmemstats[M_LAST];
21struct kmemusage *kmemusage;
22char *memname[] = INITKMEMNAMES;
23char *kmembase, *kmemlimit;
24char *memname[] = INITKMEMNAMES;
25long malloc_reentered;
26#define IN { if (malloc_reentered) panic("malloc reentered");\
27 else malloc_reentered = 1;}
28#define OUT (malloc_reentered = 0)
29
30#ifdef DIAGNOSTIC
31/*
32 * This structure provides a set of masks to catch unaligned frees.
33 */
34long addrmask[] = { 0,
35 0x00000001, 0x00000003, 0x00000007, 0x0000000f,
36 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
37 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
38 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
39};
40
41/*
42 * The WEIRD_ADDR is used as known text to copy into free objects so
43 * that modifications after frees can be detected.
44 */
45#define WEIRD_ADDR 0xdeadbeef
46#define MAX_COPY 32
47
48/*
49 * Normally the first word of the structure is used to hold the list
50 * pointer for free objects. However, when running with diagnostics,
51 * we use the third and fourth fields, so as to catch modifications
52 * in the most commonly trashed first two words.
53 */
54struct freelist {
55 long spare0;
56 short type;
57 long spare1;
58 caddr_t next;
59};
60#else /* !DIAGNOSTIC */
61struct freelist {
62 caddr_t next;
63};
64#endif /* DIAGNOSTIC */
65
66struct uselist {
67 struct uselist *next;
68 caddr_t mem;
69 long size;
70 long type;
71} *listhd;
72
73/*
74 * Allocate a block of memory
75 */
76void *
77malloc(size, type, flags)
78 unsigned long size;
79 int type, flags;
80{
81 register struct kmembuckets *kbp;
82 register struct kmemusage *kup;
83 register struct freelist *freep;
84 long indx, npg, alloc, allocsize;
85 int s;
86 caddr_t va, cp, rp;
87#ifdef KMEMSTATS
88 register struct kmemstats *ksp = &kmemstats[type];
89
90#ifdef DIAGNOSTIC
91 if (((unsigned long)type) > M_LAST)
92 panic("malloc - bogus type");
93 if (type == M_NAMEI)
94 curproc->p_spare[0]++;
95 indx = BUCKETINDX(size);
96 kbp = &bucket[indx];
97 s = splimp();
98 IN;
99#ifdef KMEMSTATS
100 while (ksp->ks_memuse >= ksp->ks_limit) {
101 if (flags & M_NOWAIT) {
102 OUT;
103 splx(s);
104 return ((void *) NULL);
105 }
106 if (ksp->ks_limblocks < 65535)
107 ksp->ks_limblocks++;
108 OUT;
109 tsleep((caddr_t)ksp, PSWP+2, memname[type], 0);
110 IN;
111 }
112 ksp->ks_size |= 1 << indx;
113#endif
114#ifdef DIAGNOSTIC
115 copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY;
116#endif
117 if (kbp->kb_next == NULL) {
118 kbp->kb_last = NULL;
119 if (size > MAXALLOCSAVE)
120 allocsize = roundup(size, CLBYTES);
121 else
122 allocsize = 1 << indx;
123 npg = clrnd(btoc(allocsize));
124 va = (caddr_t) kmem_malloc(kmem_map, (vm_size_t)ctob(npg),
125 !(flags & M_NOWAIT));
126 if (va == NULL) {
127 OUT;
128 splx(s);
129 return ((void *) NULL);
130 }
131#ifdef KMEMSTATS
132 kbp->kb_total += kbp->kb_elmpercl;
133#endif
134 kup = btokup(va);
135 kup->ku_indx = indx;
136 if (allocsize > MAXALLOCSAVE) {
137 if (npg > 65535)
138 panic("malloc: allocation too large");
139 kup->ku_pagecnt = npg;
140#ifdef KMEMSTATS
141 ksp->ks_memuse += allocsize;
142#endif
143 goto out;
144 }
145#ifdef KMEMSTATS
146 kup->ku_freecnt = kbp->kb_elmpercl;
147 kbp->kb_totalfree += kbp->kb_elmpercl;
148#endif
149 /*
150 * Just in case we blocked while allocating memory,
151 * and someone else also allocated memory for this
152 * bucket, don't assume the list is still empty.
153 */
154 savedlist = kbp->kb_next;
155 rp = kbp->kb_next; /* returned while blocked in vmemall */
156 for (cp = kbp->kb_next; cp >= va; cp -= allocsize) {
157 ((caddr_t *)cp)[2] = (cp > va ? cp - allocsize : rp);
158 if (indx == 7) {
159 long *lp = (long *)cp;
160 lp[0] = lp[1] = lp[3] = lp[4] = -1;
161 }
162 }
163 }
164 va = kbp->kb_next;
165 kbp->kb_next = ((caddr_t *)va)[2];
166 if (indx == 7) {
167 long *lp = (long *)va;
168 if (lp[0] != -1 || lp[1] != -1 || lp[3] != -1 || lp[4] != -1)
169 panic("malloc meddled");
170 }
171#ifdef KMEMSTATS
172 kup = btokup(va);
173 if (kup->ku_indx != indx)
174 panic("malloc: wrong bucket");
175 if (kup->ku_freecnt == 0)
176 panic("malloc: lost data");
177 kup->ku_freecnt--;
178 kbp->kb_totalfree--;
179 ksp->ks_memuse += 1 << indx;
180out:
181 kbp->kb_calls++;
182 ksp->ks_inuse++;
183 ksp->ks_calls++;
184 if (ksp->ks_memuse > ksp->ks_maxused)
185 ksp->ks_maxused = ksp->ks_memuse;
186#else
187out:
188#endif
189 if (size > 64 && size <= 128) {
190 mlp = (struct uselist *)malloc(sizeof(*mlp), M_TEMP, M_WAITOK);
191 mlp->type = type;
192 mlp->size = size;
193 mlp->mem = va;
194 mlp->next = listhd;
195 listhd = mlp;
196 }
197 OUT;
198 splx(s);
199 return ((void *) va);
200}
201
202#ifdef DIAGNOSTIC
203long addrmask[] = { 0x00000000,
204 0x00000001, 0x00000003, 0x00000007, 0x0000000f,
205 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
206 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
207 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
208};
209#endif /* DIAGNOSTIC */
210
211/*
212 * Free a block of memory allocated by malloc.
213 */
214void
215free(addr, type)
216 void *addr;
217 int type;
218{
219 register struct kmembuckets *kbp;
220 register struct kmemusage *kup;
221 register struct freelist *freep;
222 long size;
223 int s;
224#ifdef DIAGNOSTIC
225 caddr_t cp;
226 long *end, *lp, alloc, copysize;
227#endif
228#ifdef KMEMSTATS
229 register struct kmemstats *ksp = &kmemstats[type];
230#endif
231
232 kup = btokup(addr);
233 size = 1 << kup->ku_indx;
234#ifdef DIAGNOSTIC
235 if (size > NBPG * CLSIZE)
236 alloc = addrmask[BUCKETINDX(NBPG * CLSIZE)];
237 else
238 alloc = addrmask[kup->ku_indx];
239 if (((u_long)addr & alloc) != 0) {
240 printf("free: unaligned addr 0x%x, size %d, type %d, mask %d\n",
241 addr, size, type, alloc);
242 panic("free: unaligned addr");
243 }
244#endif /* DIAGNOSTIC */
245 size = 1 << kup->ku_indx;
246 kbp = &bucket[kup->ku_indx];
247 s = splimp();
248 if (size == 128) {
249 struct uselist *mlp, *pmlp;
250
251 mlp = listhd;
252 if (mlp->mem == addr)
253 listhd = mlp->next;
254 else for (pmlp = mlp, mlp = mlp->next ; mlp; mlp = mlp->next) {
255 if (mlp->mem == addr) {
256 pmlp->next = mlp->next;
257 break;
258 }
259 pmlp = mlp;
260 }
261 if (mlp == NULL)
262 printf("free: lost type %s size %d\n", memname[type],
263 size);
264 else
265 free(mlp, M_TEMP);
266 }
267#ifdef DIAGNOSTIC
268 /*
269 * Check for returns of data that do not point to the
270 * beginning of the allocation.
271 */
272 if (type == M_NAMEI)
273 curproc->p_spare[0]--;
274 if (size > NBPG * CLSIZE)
275 alloc = addrmask[BUCKETINDX(NBPG * CLSIZE)];
276 else
277 alloc = addrmask[kup->ku_indx];
278 if (((u_long)addr & alloc) != 0)
279 panic("free: unaligned addr 0x%x, size %d, type %s, mask %d\n",
280 addr, size, memname[type], alloc);
281#endif /* DIAGNOSTIC */
282 IN;
283 if (size > MAXALLOCSAVE) {
284 kmem_free(kmem_map, (vm_offset_t)addr, ctob(kup->ku_pagecnt));
285#ifdef KMEMSTATS
286 size = kup->ku_pagecnt << PGSHIFT;
287 ksp->ks_memuse -= size;
288 kup->ku_indx = 0;
289 kup->ku_pagecnt = 0;
290 if (ksp->ks_memuse + size >= ksp->ks_limit &&
291 ksp->ks_memuse < ksp->ks_limit)
292 wakeup((caddr_t)ksp);
293 ksp->ks_inuse--;
294 kbp->kb_total -= 1;
295#endif
296 splx(s);
297 return;
298 }
299 freep = (struct freelist *)addr;
300#ifdef DIAGNOSTIC
301 /*
302 * Check for multiple frees. Use a quick check to see if
303 * it looks free before laboriously searching the freelist.
304 */
305 if (freep->spare0 == WEIRD_ADDR) {
306 for (cp = kbp->kb_next; cp; cp = *(caddr_t *)cp) {
307 if (addr != cp)
308 continue;
309 printf("multiply freed item 0x%x\n", addr);
310 panic("free: duplicated free");
311 }
312 }
313 /*
314 * Copy in known text to detect modification after freeing
315 * and to make it look free. Also, save the type being freed
316 * so we can list likely culprit if modification is detected
317 * when the object is reallocated.
318 */
319 copysize = size < MAX_COPY ? size : MAX_COPY;
320 end = (long *)&((caddr_t)addr)[copysize];
321 for (lp = (long *)addr; lp < end; lp++)
322 *lp = WEIRD_ADDR;
323 freep->type = type;
324#endif /* DIAGNOSTIC */
325 if (size == 128) {
326 long *lp = (long *)addr;
327 lp[0] = lp[1] = lp[3] = lp[4] = -1;
328 }
329#ifdef KMEMSTATS
330 kup->ku_freecnt++;
331 if (kup->ku_freecnt >= kbp->kb_elmpercl)
332 if (kup->ku_freecnt > kbp->kb_elmpercl)
333 panic("free: multiple frees");
334 else if (kbp->kb_totalfree > kbp->kb_highwat)
335 kbp->kb_couldfree++;
336 kbp->kb_totalfree++;
337 ksp->ks_memuse -= size;
338 if (ksp->ks_memuse + size >= ksp->ks_limit &&
339 ksp->ks_memuse < ksp->ks_limit)
340 wakeup((caddr_t)ksp);
341 ksp->ks_inuse--;
342#endif
343 if (kbp->kb_next == NULL)
344 kbp->kb_next = addr;
345 else
346 ((struct freelist *)kbp->kb_last)->next = addr;
347 freep->next = NULL;
348 kbp->kb_last = addr;
349 OUT;
350 splx(s);
351}
352
353/*
354 * Initialize the kernel memory allocator
355 */
356kmeminit()
357{
358 register long indx;
359 int npg;
360
361#if ((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0)
362 ERROR!_kmeminit:_MAXALLOCSAVE_not_power_of_2
363#endif
364#if (MAXALLOCSAVE > MINALLOCSIZE * 32768)
365 ERROR!_kmeminit:_MAXALLOCSAVE_too_big
366#endif
367#if (MAXALLOCSAVE < CLBYTES)
368 ERROR!_kmeminit:_MAXALLOCSAVE_too_small
369#endif
370 npg = VM_KMEM_SIZE/ NBPG;
371 kmemusage = (struct kmemusage *) kmem_alloc(kernel_map,
372 (vm_size_t)(npg * sizeof(struct kmemusage)));
373 kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase,
374 (vm_offset_t *)&kmemlimit, (vm_size_t)(npg * NBPG), FALSE);
375#ifdef KMEMSTATS
376 for (indx = 0; indx < MINBUCKET + 16; indx++) {
377 if (1 << indx >= CLBYTES)
378 bucket[indx].kb_elmpercl = 1;
379 else
380 bucket[indx].kb_elmpercl = CLBYTES / (1 << indx);
381 bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
382 }
383 for (indx = 0; indx < M_LAST; indx++)
384 kmemstats[indx].ks_limit = npg * NBPG * 6 / 10;
385#endif
386}