Commit | Line | Data |
---|---|---|
15637ed4 RG |
1 | /* |
2 | * Copyright (c) 1987, 1991 The Regents of the University of California. | |
3 | * All rights reserved. | |
4 | * | |
5 | * Redistribution and use in source and binary forms, with or without | |
6 | * modification, are permitted provided that the following conditions | |
7 | * are met: | |
8 | * 1. Redistributions of source code must retain the above copyright | |
9 | * notice, this list of conditions and the following disclaimer. | |
10 | * 2. Redistributions in binary form must reproduce the above copyright | |
11 | * notice, this list of conditions and the following disclaimer in the | |
12 | * documentation and/or other materials provided with the distribution. | |
13 | * 3. All advertising materials mentioning features or use of this software | |
14 | * must display the following acknowledgement: | |
15 | * This product includes software developed by the University of | |
16 | * California, Berkeley and its contributors. | |
17 | * 4. Neither the name of the University nor the names of its contributors | |
18 | * may be used to endorse or promote products derived from this software | |
19 | * without specific prior written permission. | |
20 | * | |
21 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
22 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
23 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
24 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
25 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
26 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
27 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
28 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
29 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
30 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
31 | * SUCH DAMAGE. | |
32 | * | |
600f7f07 | 33 | * from: @(#)kern_malloc.c 7.25 (Berkeley) 5/8/91 |
e8bbcc37 | 34 | * $Id: kern_malloc.c,v 1.8 1994/02/10 08:04:07 davidg Exp $ |
15637ed4 RG |
35 | */ |
36 | ||
37 | #include "param.h" | |
4c45483e | 38 | #include "systm.h" |
15637ed4 RG |
39 | #include "proc.h" |
40 | #include "kernel.h" | |
41 | #include "malloc.h" | |
42 | #include "vm/vm.h" | |
43 | #include "vm/vm_kern.h" | |
44 | ||
9b9ba645 DG |
45 | extern int vm_page_count; |
46 | ||
15637ed4 RG |
47 | struct kmembuckets bucket[MINBUCKET + 16]; |
48 | struct kmemstats kmemstats[M_LAST]; | |
49 | struct kmemusage *kmemusage; | |
50 | char *kmembase, *kmemlimit; | |
51 | char *memname[] = INITKMEMNAMES; | |
52 | ||
53 | /* | |
54 | * Allocate a block of memory | |
55 | */ | |
56 | void * | |
57 | malloc(size, type, flags) | |
58 | unsigned long size; | |
59 | int type, flags; | |
60 | { | |
61 | register struct kmembuckets *kbp; | |
62 | register struct kmemusage *kup; | |
63 | long indx, npg, alloc, allocsize; | |
64 | int s; | |
65 | caddr_t va, cp, savedlist; | |
66 | #ifdef KMEMSTATS | |
67 | register struct kmemstats *ksp = &kmemstats[type]; | |
68 | ||
c138d68d | 69 | if (((unsigned long)type) >= M_LAST) |
15637ed4 RG |
70 | panic("malloc - bogus type"); |
71 | #endif | |
72 | ||
73 | indx = BUCKETINDX(size); | |
74 | kbp = &bucket[indx]; | |
75 | s = splimp(); | |
76 | #ifdef KMEMSTATS | |
77 | while (ksp->ks_memuse >= ksp->ks_limit) { | |
78 | if (flags & M_NOWAIT) { | |
79 | splx(s); | |
80 | return ((void *) NULL); | |
81 | } | |
82 | if (ksp->ks_limblocks < 65535) | |
83 | ksp->ks_limblocks++; | |
84 | tsleep((caddr_t)ksp, PSWP+2, memname[type], 0); | |
85 | } | |
86 | #endif | |
87 | if (kbp->kb_next == NULL) { | |
88 | if (size > MAXALLOCSAVE) | |
89 | allocsize = roundup(size, CLBYTES); | |
90 | else | |
91 | allocsize = 1 << indx; | |
92 | npg = clrnd(btoc(allocsize)); | |
93 | va = (caddr_t) kmem_malloc(kmem_map, (vm_size_t)ctob(npg), | |
94 | !(flags & M_NOWAIT)); | |
95 | if (va == NULL) { | |
96 | splx(s); | |
97 | return ((void *) NULL); | |
98 | } | |
99 | #ifdef KMEMSTATS | |
100 | kbp->kb_total += kbp->kb_elmpercl; | |
101 | #endif | |
102 | kup = btokup(va); | |
103 | kup->ku_indx = indx; | |
104 | if (allocsize > MAXALLOCSAVE) { | |
105 | if (npg > 65535) | |
106 | panic("malloc: allocation too large"); | |
107 | kup->ku_pagecnt = npg; | |
108 | #ifdef KMEMSTATS | |
109 | ksp->ks_memuse += allocsize; | |
110 | #endif | |
111 | goto out; | |
112 | } | |
113 | #ifdef KMEMSTATS | |
114 | kup->ku_freecnt = kbp->kb_elmpercl; | |
115 | kbp->kb_totalfree += kbp->kb_elmpercl; | |
116 | #endif | |
117 | /* | |
118 | * Just in case we blocked while allocating memory, | |
119 | * and someone else also allocated memory for this | |
120 | * bucket, don't assume the list is still empty. | |
121 | */ | |
122 | savedlist = kbp->kb_next; | |
123 | kbp->kb_next = va + (npg * NBPG) - allocsize; | |
124 | for (cp = kbp->kb_next; cp > va; cp -= allocsize) | |
125 | *(caddr_t *)cp = cp - allocsize; | |
126 | *(caddr_t *)cp = savedlist; | |
127 | } | |
128 | va = kbp->kb_next; | |
129 | kbp->kb_next = *(caddr_t *)va; | |
130 | #ifdef KMEMSTATS | |
131 | kup = btokup(va); | |
132 | if (kup->ku_indx != indx) | |
133 | panic("malloc: wrong bucket"); | |
134 | if (kup->ku_freecnt == 0) | |
135 | panic("malloc: lost data"); | |
136 | kup->ku_freecnt--; | |
137 | kbp->kb_totalfree--; | |
138 | ksp->ks_memuse += 1 << indx; | |
139 | out: | |
140 | kbp->kb_calls++; | |
141 | ksp->ks_inuse++; | |
142 | ksp->ks_calls++; | |
143 | if (ksp->ks_memuse > ksp->ks_maxused) | |
144 | ksp->ks_maxused = ksp->ks_memuse; | |
145 | #else | |
146 | out: | |
147 | #endif | |
148 | splx(s); | |
149 | return ((void *) va); | |
150 | } | |
151 | ||
152 | #ifdef DIAGNOSTIC | |
153 | long addrmask[] = { 0x00000000, | |
154 | 0x00000001, 0x00000003, 0x00000007, 0x0000000f, | |
155 | 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff, | |
156 | 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff, | |
157 | 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff, | |
158 | }; | |
159 | #endif /* DIAGNOSTIC */ | |
160 | ||
161 | /* | |
162 | * Free a block of memory allocated by malloc. | |
163 | */ | |
164 | void | |
165 | free(addr, type) | |
166 | void *addr; | |
167 | int type; | |
168 | { | |
169 | register struct kmembuckets *kbp; | |
170 | register struct kmemusage *kup; | |
171 | long alloc, size; | |
172 | int s; | |
173 | #ifdef KMEMSTATS | |
174 | register struct kmemstats *ksp = &kmemstats[type]; | |
175 | #endif | |
176 | ||
177 | kup = btokup(addr); | |
178 | size = 1 << kup->ku_indx; | |
179 | #ifdef DIAGNOSTIC | |
180 | if (size > NBPG * CLSIZE) | |
181 | alloc = addrmask[BUCKETINDX(NBPG * CLSIZE)]; | |
182 | else | |
183 | alloc = addrmask[kup->ku_indx]; | |
184 | if (((u_long)addr & alloc) != 0) { | |
185 | printf("free: unaligned addr 0x%x, size %d, type %d, mask %d\n", | |
186 | addr, size, type, alloc); | |
187 | panic("free: unaligned addr"); | |
188 | } | |
189 | #endif /* DIAGNOSTIC */ | |
190 | kbp = &bucket[kup->ku_indx]; | |
191 | s = splimp(); | |
192 | if (size > MAXALLOCSAVE) { | |
193 | kmem_free(kmem_map, (vm_offset_t)addr, ctob(kup->ku_pagecnt)); | |
194 | #ifdef KMEMSTATS | |
195 | size = kup->ku_pagecnt << PGSHIFT; | |
196 | ksp->ks_memuse -= size; | |
197 | kup->ku_indx = 0; | |
198 | kup->ku_pagecnt = 0; | |
199 | if (ksp->ks_memuse + size >= ksp->ks_limit && | |
200 | ksp->ks_memuse < ksp->ks_limit) | |
201 | wakeup((caddr_t)ksp); | |
202 | ksp->ks_inuse--; | |
203 | kbp->kb_total -= 1; | |
204 | #endif | |
205 | splx(s); | |
206 | return; | |
207 | } | |
208 | #ifdef KMEMSTATS | |
209 | kup->ku_freecnt++; | |
210 | if (kup->ku_freecnt >= kbp->kb_elmpercl) | |
211 | if (kup->ku_freecnt > kbp->kb_elmpercl) | |
212 | panic("free: multiple frees"); | |
213 | else if (kbp->kb_totalfree > kbp->kb_highwat) | |
214 | kbp->kb_couldfree++; | |
215 | kbp->kb_totalfree++; | |
216 | ksp->ks_memuse -= size; | |
217 | if (ksp->ks_memuse + size >= ksp->ks_limit && | |
218 | ksp->ks_memuse < ksp->ks_limit) | |
219 | wakeup((caddr_t)ksp); | |
220 | ksp->ks_inuse--; | |
221 | #endif | |
222 | *(caddr_t *)addr = kbp->kb_next; | |
223 | kbp->kb_next = addr; | |
224 | splx(s); | |
225 | } | |
226 | ||
227 | /* | |
228 | * Initialize the kernel memory allocator | |
229 | */ | |
4c45483e | 230 | void |
15637ed4 RG |
231 | kmeminit() |
232 | { | |
233 | register long indx; | |
234 | int npg; | |
235 | ||
15637ed4 | 236 | #if (MAXALLOCSAVE > MINALLOCSIZE * 32768) |
4c45483e | 237 | # error "kmeminit: MAXALLOCSAVE too big" |
15637ed4 | 238 | #endif |
d8bca54c | 239 | #if (MAXALLOCSAVE < CLBYTES-1) |
4c45483e | 240 | # error "kmeminit: MAXALLOCSAVE too small" |
15637ed4 | 241 | #endif |
1d7f5b47 | 242 | npg = (VM_KMEM_SIZE + VM_MBUF_SIZE) / NBPG; |
15637ed4 RG |
243 | kmemusage = (struct kmemusage *) kmem_alloc(kernel_map, |
244 | (vm_size_t)(npg * sizeof(struct kmemusage))); | |
245 | kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase, | |
246 | (vm_offset_t *)&kmemlimit, (vm_size_t)(npg * NBPG), FALSE); | |
247 | #ifdef KMEMSTATS | |
248 | for (indx = 0; indx < MINBUCKET + 16; indx++) { | |
249 | if (1 << indx >= CLBYTES) | |
250 | bucket[indx].kb_elmpercl = 1; | |
251 | else | |
252 | bucket[indx].kb_elmpercl = CLBYTES / (1 << indx); | |
253 | bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl; | |
254 | } | |
255 | for (indx = 0; indx < M_LAST; indx++) | |
256 | kmemstats[indx].ks_limit = npg * NBPG * 6 / 10; | |
b6e80617 DG |
257 | |
258 | /* limit the amount of mbuf space to 1/16 of system memory */ | |
259 | kmemstats[M_MBUF].ks_limit = (vm_page_count * NBPG) / 16; | |
15637ed4 RG |
260 | #endif |
261 | } | |
e8bbcc37 AC |
262 | |
263 | void * | |
264 | contigmalloc(size, type, flags, maxpa, alignmask, boundarymask) | |
265 | unsigned long size; | |
266 | int type; | |
267 | int flags; | |
268 | unsigned long maxpa; /* e.g. 16M - 1 for isa dma */ | |
269 | unsigned long alignmask; /* e.g. 1M - 1 for M boundary */ | |
270 | unsigned long boundarymask; /* e.g. 64K - 1 for 8-bit isa dma */ | |
271 | { | |
272 | unsigned long skipsize; | |
273 | void *skipva; | |
274 | ||
275 | size = round_page(size); | |
276 | if (size == 0 || size > boundarymask + 1) | |
277 | return (NULL); | |
278 | ||
279 | /* | |
280 | * Attempt to push the physical address to a suitable boundary by | |
281 | * skipping some memory. We could be cleverer here. E.g., mallocate | |
282 | * lots of single pages and then free the ones that we hope to use. | |
283 | * flags == M_WAIT is likely to hang the system. | |
284 | */ | |
285 | for (skipsize = 0, skipva = NULL; ; skipsize += NBPG) { | |
286 | unsigned long off; | |
287 | unsigned long pa; | |
288 | unsigned long prevpa; | |
289 | void *va; | |
290 | ||
291 | if (skipsize != 0) { | |
292 | skipva = malloc(skipsize, type, flags); | |
293 | if (skipva == NULL) { | |
294 | #ifdef DEBUG | |
295 | printf("contigmalloc: skipva NULL on try %d\n", | |
296 | 1 + skipsize / NBPG); | |
297 | #endif | |
298 | return (NULL); | |
299 | } | |
300 | } | |
301 | va = malloc(size, type, flags); | |
302 | if (skipsize != 0) | |
303 | free(skipva, type); | |
304 | if (va == NULL) { | |
305 | #ifdef DEBUG | |
306 | printf("contigmalloc: va NULL on try %d\n", | |
307 | 1 + skipsize / NBPG); | |
308 | #endif | |
309 | return (NULL); | |
310 | } | |
311 | for (off = 0, prevpa = 0; off < size; off += NBPG, prevpa = pa) | |
312 | { | |
313 | pa = pmap_extract(pmap_kernel(), (vm_offset_t)va + off); | |
314 | if (pa + NBPG - 1 > maxpa | |
315 | || off == 0 && pa & alignmask | |
316 | || off != 0 | |
317 | && (pa != prevpa + NBPG | |
318 | || (pa & boundarymask) == 0)) | |
319 | goto fail; | |
320 | } | |
321 | #ifdef DEBUG | |
322 | printf("contigmalloc: success at va %lx pa %lx on try %d\n", | |
323 | (unsigned long)va, | |
324 | pmap_extract(pmap_kernel(), (unsigned long)va), | |
325 | 1 + skipsize / NBPG); | |
326 | #endif | |
327 | return (va); | |
328 | fail: | |
329 | free(va, type); | |
330 | } | |
331 | } | |
332 | ||
333 |