Initial commit of OpenSPARC T2 architecture model.
[OpenSPARC-T2-SAM] / hypervisor / src / sample / lib / malloc.c
CommitLineData
920dae64
AT
1/*
2* ========== Copyright Header Begin ==========================================
3*
4* Hypervisor Software File: malloc.c
5*
6* Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
7*
8* - Do no alter or remove copyright notices
9*
10* - Redistribution and use of this software in source and binary forms, with
11* or without modification, are permitted provided that the following
12* conditions are met:
13*
14* - Redistribution of source code must retain the above copyright notice,
15* this list of conditions and the following disclaimer.
16*
17* - Redistribution in binary form must reproduce the above copyright notice,
18* this list of conditions and the following disclaimer in the
19* documentation and/or other materials provided with the distribution.
20*
21* Neither the name of Sun Microsystems, Inc. or the names of contributors
22* may be used to endorse or promote products derived from this software
23* without specific prior written permission.
24*
25* This software is provided "AS IS," without a warranty of any kind.
26* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
27* INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
28* PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN
29* MICROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL NOT BE LIABLE FOR
30* ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR
31* DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL SUN
32* OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA, OR
33* FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE
34* DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY,
35* ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF
36* SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
37*
38* You acknowledge that this software is not designed, licensed or
39* intended for use in the design, construction, operation or maintenance of
40* any nuclear facility.
41*
42* ========== Copyright Header End ============================================
43*/
44/* Copyright (c) 1988 AT&T */
45/* All Rights Reserved */
46
47/* THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF AT&T */
48/* The copyright notice above does not evidence any */
49/* actual or intended publication of such source code. */
50
51#pragma ident "@(#)malloc.c 1.17 98/07/23 SMI" /* SVr4.0 1.12 */
52
53/*LINTLIBRARY*/
54#include <sys/types.h>
55
56#ifndef debug
57#define NDEBUG
58#endif
59
60#include <stdlib.h>
61#include <string.h>
62#include <thread.h>
63#include <synch.h>
64#include <assert.h>
65#include <malloc.h>
66#include "mallint.h"
67#include <unistd.h>
68#include <limits.h>
69
70static mutex_t mlock = DEFAULTMUTEX;
71static ssize_t freespace(struct holdblk *);
72static void *malloc_unlocked(size_t);
73static void *realloc_unlocked(void *, size_t);
74static void free_unlocked(void *);
75static void *morecore(size_t);
76
77/*
78 * use level memory allocater (malloc, free, realloc)
79 *
80 * -malloc, free, realloc and mallopt form a memory allocator
81 * similar to malloc, free, and realloc. The routines
82 * here are much faster than the original, with slightly worse
83 * space usage (a few percent difference on most input). They
84 * do not have the property that data in freed blocks is left
85 * untouched until the space is reallocated.
86 *
87 * -Memory is kept in the "arena", a singly linked list of blocks.
88 * These blocks are of 3 types.
89 * 1. A free block. This is a block not in use by the
90 * user. It has a 3 word header. (See description
91 * of the free queue.)
92 * 2. An allocated block. This is a block the user has
93 * requested. It has only a 1 word header, pointing
94 * to the next block of any sort.
95 * 3. A permanently allocated block. This covers space
96 * aquired by the user directly through sbrk(). It
97 * has a 1 word header, as does 2.
98 * Blocks of type 1 have the lower bit of the pointer to the
99 * nextblock = 0. Blocks of type 2 and 3 have that bit set,
100 * to mark them busy.
101 *
102 * -Unallocated blocks are kept on an unsorted doubly linked
103 * free list.
104 *
105 * -Memory is allocated in blocks, with sizes specified by the
106 * user. A circular first-fit startegy is used, with a roving
107 * head of the free queue, which prevents bunching of small
108 * blocks at the head of the queue.
109 *
110 * -Compaction is performed at free time of any blocks immediately
111 * following the freed block. The freed block will be combined
112 * with a preceding block during the search phase of malloc.
113 * Since a freed block is added at the front of the free queue,
114 * which is moved to the end of the queue if considered and
115 * rejected during the search, fragmentation only occurs if
116 * a block with a contiguious preceding block that is free is
117 * freed and reallocated on the next call to malloc. The
118 * time savings of this strategy is judged to be worth the
119 * occasional waste of memory.
120 *
121 * -Small blocks (of size < MAXSIZE) are not allocated directly.
122 * A large "holding" block is allocated via a recursive call to
123 * malloc. This block contains a header and ?????? small blocks.
124 * Holding blocks for a given size of small block (rounded to the
125 * nearest ALIGNSZ bytes) are kept on a queue with the property that any
126 * holding block with an unused small block is in front of any without.
127 * A list of free blocks is kept within the holding block.
128 */
129
130/*
131 * description of arena, free queue, holding blocks etc.
132 *
133 * New compiler and linker does not guarentee order of initialized data.
134 * Define freeptr as arena[2-3] to guarentee it follows arena in memory.
135 * Later code depends on this order.
136 */
137
138static struct header arena[4] = {
139 {0, 0, 0},
140 {0, 0, 0},
141 {0, 0, 0},
142 {0, 0, 0}
143 };
144 /*
145 * the second word is a minimal block to
146 * start the arena. The first is a busy
147 * block to be pointed to by the last block.
148 */
149
150#define freeptr (arena + 2)
151 /* first and last entry in free list */
152static struct header *arenaend; /* ptr to block marking high end of arena */
153static struct header *lastblk; /* the highest block in the arena */
154static struct holdblk **holdhead; /* pointer to array of head pointers */
155 /* to holding block chains */
156/*
157 * In order to save time calculating indices, the array is 1 too
158 * large, and the first element is unused
159 *
160 * Variables controlling algorithm, esp. how holding blocs are used
161 */
162static int numlblks = NUMLBLKS;
163static int minhead = MINHEAD;
164static int change = 0; /* != 0, once param changes are no longer allowed */
165static int fastct = FASTCT;
166static unsigned int maxfast = MAXFAST;
167/* number of small block sizes to map to one size */
168
169static int grain = ALIGNSZ;
170
171#ifdef debug
172static int case1count = 0;
173
174static
175checkq(void)
176{
177 register struct header *p;
178
179 p = &freeptr[0];
180
181 /* check forward */
182 /*CSTYLED*/
183 while (p != &freeptr[1]) {
184 p = p->nextfree;
185 assert(p->prevfree->nextfree == p);
186 }
187
188 /* check backward */
189 /*CSTYLED*/
190 while (p != &freeptr[0]) {
191 p = p->prevfree;
192 assert(p->nextfree->prevfree == p);
193 }
194}
195#endif
196
197
198/*
199 * malloc(nbytes) - give a user nbytes to use
200 */
201
202void *
203malloc(size_t nbytes)
204{
205 void *ret;
206
207 mutex_lock(&mlock);
208 ret = malloc_unlocked(nbytes);
209 mutex_unlock(&mlock);
210 return (ret);
211}
212
213
214/*
215 * malloc_unlocked(nbytes) - Do the real work for malloc
216 */
217
218static void *
219malloc_unlocked(size_t nbytes)
220{
221 struct header *blk;
222 size_t nb; /* size of entire block we need */
223
224 /* on first call, initialize */
225 if (freeptr[0].nextfree == GROUND) {
226 /* initialize arena */
227 arena[1].nextblk = (struct header *)BUSY;
228 arena[0].nextblk = (struct header *)BUSY;
229 lastblk = arenaend = &(arena[1]);
230 /* initialize free queue */
231 freeptr[0].nextfree = &(freeptr[1]);
232 freeptr[1].nextblk = &(arena[0]);
233 freeptr[1].prevfree = &(freeptr[0]);
234 /* mark that small blocks not init yet */
235 }
236 if (nbytes == 0)
237 return (NULL);
238
239 if (nbytes <= maxfast) {
240 /*
241 * We can allocate out of a holding block
242 */
243 struct holdblk *holdblk; /* head of right sized queue */
244 struct lblk *lblk; /* pointer to a little block */
245 struct holdblk *newhold;
246
247 if (!change) {
248 int i;
249 /*
250 * This allocates space for hold block
251 * pointers by calling malloc recursively.
252 * Maxfast is temporarily set to 0, to
253 * avoid infinite recursion. allocate
254 * space for an extra ptr so that an index
255 * is just ->blksz/grain, with the first
256 * ptr unused.
257 */
258 change = 1; /* change to algorithm params */
259 /* no longer allowed */
260 /*
261 * temporarily alter maxfast, to avoid
262 * infinite recursion
263 */
264 maxfast = 0;
265 holdhead = (struct holdblk **)
266 malloc_unlocked(sizeof (struct holdblk *) *
267 (fastct + 1));
268 if (holdhead == NULL)
269 return (malloc_unlocked(nbytes));
270 for (i = 1; i <= fastct; i++) {
271 holdhead[i] = HGROUND;
272 }
273 maxfast = fastct * grain;
274 }
275 /*
276 * Note that this uses the absolute min header size (MINHEAD)
277 * unlike the large block case which uses minhead
278 *
279 * round up to nearest multiple of grain
280 * code assumes grain is a multiple of MINHEAD
281 */
282 /* round up to grain */
283 nb = (nbytes + grain - 1) / grain * grain;
284 holdblk = holdhead[nb / grain];
285 nb = nb + MINHEAD;
286 /*
287 * look for space in the holding block. Blocks with
288 * space will be in front of those without
289 */
290 if ((holdblk != HGROUND) && (holdblk->lfreeq != LGROUND)) {
291 /* there is space */
292 lblk = holdblk->lfreeq;
293
294 /*
295 * Now make lfreeq point to a free block.
296 * If lblk has been previously allocated and
297 * freed, it has a valid pointer to use.
298 * Otherwise, lblk is at the beginning of
299 * the unallocated blocks at the end of
300 * the holding block, so, if there is room, take
301 * the next space. If not, mark holdblk full,
302 * and move holdblk to the end of the queue
303 */
304 if (lblk < holdblk->unused) {
305 /* move to next holdblk, if this one full */
306 if ((holdblk->lfreeq =
307 CLRSMAL(lblk->header.nextfree)) ==
308 LGROUND) {
309 holdhead[(nb-MINHEAD) / grain] =
310 holdblk->nexthblk;
311 }
312 } else if (((char *)holdblk->unused + nb) <
313 ((char *)holdblk + HOLDSZ(nb))) {
314 holdblk->unused = (struct lblk *)
315 ((char *)holdblk->unused+nb);
316 holdblk->lfreeq = holdblk->unused;
317 } else {
318 holdblk->unused = (struct lblk *)
319 ((char *)holdblk->unused+nb);
320 holdblk->lfreeq = LGROUND;
321 holdhead[(nb-MINHEAD)/grain] =
322 holdblk->nexthblk;
323 }
324 /* mark as busy and small */
325 lblk->header.holder = (struct holdblk *)SETALL(holdblk);
326 } else {
327 /* we need a new holding block */
328 newhold = (struct holdblk *)
329 malloc_unlocked(HOLDSZ(nb));
330 if ((char *)newhold == NULL) {
331 return (NULL);
332 }
333 /* add to head of free queue */
334 if (holdblk != HGROUND) {
335 newhold->nexthblk = holdblk;
336 newhold->prevhblk = holdblk->prevhblk;
337 holdblk->prevhblk = newhold;
338 newhold->prevhblk->nexthblk = newhold;
339 } else {
340 newhold->nexthblk = newhold->prevhblk = newhold;
341 }
342 holdhead[(nb-MINHEAD)/grain] = newhold;
343 /* set up newhold */
344 lblk = (struct lblk *)(newhold->space);
345 newhold->lfreeq = newhold->unused =
346 (struct lblk *)((char *)newhold->space+nb);
347 lblk->header.holder = (struct holdblk *)SETALL(newhold);
348 newhold->blksz = nb-MINHEAD;
349 }
350#ifdef debug
351 assert(((struct holdblk *)CLRALL(lblk->header.holder))->blksz >=
352 nbytes);
353#endif /* debug */
354 return ((char *)lblk + MINHEAD);
355 } else {
356 /*
357 * We need an ordinary block
358 */
359 struct header *newblk; /* used for creating a block */
360
361 /* get number of bytes we need */
362 nb = nbytes + minhead;
363 nb = (nb + ALIGNSZ - 1) / ALIGNSZ * ALIGNSZ; /* align */
364 nb = (nb > MINBLKSZ) ? nb : MINBLKSZ;
365 /*
366 * see if there is a big enough block
367 * If none exists, you will get to freeptr[1].
368 * freeptr[1].next = &arena[0], so when you do the test,
369 * the result is a large positive number, since arena[0]
370 * comes before all blocks. Arena[0] is marked busy so
371 * that it will not be compacted. This kludge is for the
372 * sake of the almighty efficiency.
373 */
374 /* check that a very large request won't cause an inf. loop */
375
376 if ((freeptr[1].nextblk-&(freeptr[1])) < nb) {
377 return (NULL);
378 } else {
379 struct header *next; /* following block */
380 struct header *nextnext; /* block after next */
381
382 blk = freeptr;
383 do {
384 blk = blk->nextfree;
385 /* see if we can compact */
386 next = blk->nextblk;
387 if (!TESTBUSY(nextnext = next->nextblk)) {
388 do {
389 DELFREEQ(next);
390 next = nextnext;
391 nextnext = next->nextblk;
392 } while (!TESTBUSY(nextnext));
393 /*
394 * next will be at most == to lastblk,
395 * but I think the >= test is faster
396 */
397 if (next >= arenaend)
398 lastblk = blk;
399 blk->nextblk = next;
400 }
401 } while (((char *)(next) - (char *)blk) < nb);
402 }
403 /*
404 * if we didn't find a block, get more memory
405 */
406 if (blk == &(freeptr[1])) {
407 /*
408 * careful coding could likely replace
409 * newend with arenaend
410 */
411 struct header *newend; /* new end of arena */
412 ssize_t nget; /* number of words to get */
413
414 /*
415 * Three cases - 1. There is space between arenaend
416 * and the break value that will become
417 * a permanently allocated block.
418 * 2. Case 1 is not true, and the last
419 * block is allocated.
420 * 3. Case 1 is not true, and the last
421 * block is free
422 */
423 if ((newblk = (struct header *)sbrk(0)) !=
424 (struct header *)((char *)arenaend + HEADSZ)) {
425 /* case 1 */
426#ifdef debug
427 if (case1count++ > 0)
428 write(2, "Case 1 hit more that once. "
429 "brk or sbrk?\n", 41);
430#endif
431 /* get size to fetch */
432 nget = nb + HEADSZ;
433 /* round up to a block */
434 nget = (nget + BLOCKSZ - 1)/BLOCKSZ * BLOCKSZ;
435 assert((int)newblk % ALIGNSZ == 0);
436 /* get memory */
437 if (morecore(nget) == (void *)-1)
438 return (NULL);
439 /* add to arena */
440 newend = (struct header *)((char *)newblk + nget
441 - HEADSZ);
442 assert((int)newblk % ALIGNSZ == 0);
443 newend->nextblk = SETBUSY(&(arena[1]));
444/* ??? newblk ?? */
445 newblk->nextblk = newend;
446
447 /*
448 * space becomes a permanently allocated block.
449 * This is likely not mt-safe as lock is not
450 * shared with brk or sbrk
451 */
452 arenaend->nextblk = SETBUSY(newblk);
453 /* adjust other pointers */
454 arenaend = newend;
455 lastblk = newblk;
456 blk = newblk;
457 } else if (TESTBUSY(lastblk->nextblk)) {
458 /* case 2 */
459 nget = (nb + BLOCKSZ - 1) / BLOCKSZ * BLOCKSZ;
460 if (morecore(nget) == (void *)-1)
461 return (NULL);
462 /* block must be word aligned */
463 assert(((int)newblk%ALIGNSZ) == 0);
464 /*
465 * stub at old arenaend becomes first word
466 * in blk
467 */
468/* ??? newblk = arenaend; */
469
470 newend =
471 (struct header *)((char *)arenaend+nget);
472 newend->nextblk = SETBUSY(&(arena[1]));
473 arenaend->nextblk = newend;
474 lastblk = blk = arenaend;
475 arenaend = newend;
476 } else {
477 /* case 3 */
478 /*
479 * last block in arena is at end of memory and
480 * is free
481 */
482 /* 1.7 had this backward without cast */
483 nget = nb -
484 ((char *)arenaend - (char *)lastblk);
485 nget = (nget + (BLOCKSZ - 1)) /
486 BLOCKSZ * BLOCKSZ;
487 assert(((int)newblk % ALIGNSZ) == 0);
488 if (morecore(nget) == (void *)-1)
489 return (NULL);
490 /* combine with last block, put in arena */
491 newend = (struct header *)
492 ((char *)arenaend + nget);
493 arenaend = lastblk->nextblk = newend;
494 newend->nextblk = SETBUSY(&(arena[1]));
495 /* set which block to use */
496 blk = lastblk;
497 DELFREEQ(blk);
498 }
499 } else {
500 struct header *nblk; /* next block */
501
502 /* take block found of free queue */
503 DELFREEQ(blk);
504 /*
505 * make head of free queue immediately follow blk,
506 * unless blk was at the end of the queue
507 */
508 nblk = blk->nextfree;
509 if (nblk != &(freeptr[1])) {
510 MOVEHEAD(nblk);
511 }
512 }
513 /* blk now points to an adequate block */
514 if (((char *)blk->nextblk - (char *)blk) - nb >= MINBLKSZ) {
515 /* carve out the right size block */
516 /* newblk will be the remainder */
517 newblk = (struct header *)((char *)blk + nb);
518 newblk->nextblk = blk->nextblk;
519 /* mark the block busy */
520 blk->nextblk = SETBUSY(newblk);
521 ADDFREEQ(newblk);
522 /* if blk was lastblk, make newblk lastblk */
523 if (blk == lastblk)
524 lastblk = newblk;
525 } else {
526 /* just mark the block busy */
527 blk->nextblk = SETBUSY(blk->nextblk);
528 }
529 }
530 CHECKQ;
531 assert((char *)CLRALL(blk->nextblk) -
532 ((char *)blk + minhead) >= nbytes);
533 assert((char *)CLRALL(blk->nextblk) -
534 ((char *)blk + minhead) < nbytes + MINBLKSZ);
535 return ((char *)blk + minhead);
536}
537
538/*
539 * free(ptr) - free block that user thinks starts at ptr
540 *
541 * input - ptr-1 contains the block header.
542 * If the header points forward, we have a normal
543 * block pointing to the next block
544 * if the header points backward, we have a small
545 * block from a holding block.
546 * In both cases, the busy bit must be set
547 */
548
549void
550free(void *ptr)
551{
552 mutex_lock(&mlock);
553 free_unlocked(ptr);
554 mutex_unlock(&mlock);
555}
556
557/*
558 * free_unlocked(ptr) - Do the real work for free()
559 */
560
561void
562free_unlocked(void *ptr)
563{
564 struct holdblk *holdblk; /* block holding blk */
565 struct holdblk *oldhead; /* former head of the hold block */
566 /* queue containing blk's holder */
567
568 if (ptr == NULL)
569 return;
570 if (TESTSMAL(((struct header *)((char *)ptr - MINHEAD))->nextblk)) {
571 struct lblk *lblk; /* pointer to freed block */
572 ssize_t offset; /* choice of header lists */
573
574 lblk = (struct lblk *)CLRBUSY((char *)ptr - MINHEAD);
575 assert((struct header *)lblk < arenaend);
576 assert((struct header *)lblk > arena);
577 /* allow twits (e.g. awk) to free a block twice */
578 holdblk = lblk->header.holder;
579 if (!TESTBUSY(holdblk))
580 return;
581 holdblk = (struct holdblk *)CLRALL(holdblk);
582 /* put lblk on its hold block's free list */
583 lblk->header.nextfree = SETSMAL(holdblk->lfreeq);
584 holdblk->lfreeq = lblk;
585 /* move holdblk to head of queue, if its not already there */
586 offset = holdblk->blksz / grain;
587 oldhead = holdhead[offset];
588 if (oldhead != holdblk) {
589 /* first take out of current spot */
590 holdhead[offset] = holdblk;
591 holdblk->nexthblk->prevhblk = holdblk->prevhblk;
592 holdblk->prevhblk->nexthblk = holdblk->nexthblk;
593 /* now add at front */
594 holdblk->nexthblk = oldhead;
595 holdblk->prevhblk = oldhead->prevhblk;
596 oldhead->prevhblk = holdblk;
597 holdblk->prevhblk->nexthblk = holdblk;
598 }
599 } else {
600 struct header *blk; /* real start of block */
601 struct header *next; /* next = blk->nextblk */
602 struct header *nextnext; /* block after next */
603
604 blk = (struct header *)((char *)ptr - minhead);
605 next = blk->nextblk;
606 /* take care of twits (e.g. awk) who return blocks twice */
607 if (!TESTBUSY(next))
608 return;
609 blk->nextblk = next = CLRBUSY(next);
610 ADDFREEQ(blk);
611 /* see if we can compact */
612 if (!TESTBUSY(nextnext = next->nextblk)) {
613 do {
614 DELFREEQ(next);
615 next = nextnext;
616 } while (!TESTBUSY(nextnext = next->nextblk));
617 if (next == arenaend) lastblk = blk;
618 blk->nextblk = next;
619 }
620 }
621 CHECKQ
622}
623
624
625/*
626 * realloc(ptr, size) - give the user a block of size "size", with
627 * the contents pointed to by ptr. Free ptr.
628 */
629
630void *
631realloc(void *ptr, size_t size)
632{
633 void *retval;
634
635 mutex_lock(&mlock);
636 retval = realloc_unlocked(ptr, size);
637 mutex_unlock(&mlock);
638 return (retval);
639}
640
641
642/*
643 * realloc_unlocked(ptr) - Do the real work for realloc()
644 */
645
646static void *
647realloc_unlocked(void *ptr, size_t size)
648{
649 struct header *blk; /* block ptr is contained in */
650 size_t trusize; /* block size as allocater sees it */
651 char *newptr; /* pointer to user's new block */
652 size_t cpysize; /* amount to copy */
653 struct header *next; /* block after blk */
654
655 if (ptr == NULL)
656 return (malloc_unlocked(size));
657
658 if (size == 0) {
659 free_unlocked(ptr);
660 return (NULL);
661 }
662
663 if (TESTSMAL(((struct lblk *)((char *)ptr - MINHEAD))->
664 header.holder)) {
665 /*
666 * we have a special small block which can't be expanded
667 *
668 * This makes the assumption that even if the user is
669 * reallocating a free block, malloc doesn't alter the contents
670 * of small blocks
671 */
672 newptr = malloc_unlocked(size);
673 if (newptr == NULL)
674 return (NULL);
675 /* this isn't to save time--its to protect the twits */
676 if ((char *)ptr != newptr) {
677 struct lblk *lblk;
678 lblk = (struct lblk *)((char *)ptr - MINHEAD);
679 cpysize = ((struct holdblk *)
680 CLRALL(lblk->header.holder))->blksz;
681 cpysize = (size > cpysize) ? cpysize : size;
682 (void) memcpy(newptr, ptr, cpysize);
683 free_unlocked(ptr);
684 }
685 } else {
686 blk = (struct header *)((char *)ptr - minhead);
687 next = blk->nextblk;
688 /*
689 * deal with twits who reallocate free blocks
690 *
691 * if they haven't reset minblk via getopt, that's
692 * their problem
693 */
694 if (!TESTBUSY(next)) {
695 DELFREEQ(blk);
696 blk->nextblk = SETBUSY(next);
697 }
698 next = CLRBUSY(next);
699 /* make blk as big as possible */
700 if (!TESTBUSY(next->nextblk)) {
701 do {
702 DELFREEQ(next);
703 next = next->nextblk;
704 } while (!TESTBUSY(next->nextblk));
705 blk->nextblk = SETBUSY(next);
706 if (next >= arenaend) lastblk = blk;
707 }
708 /* get size we really need */
709 trusize = size+minhead;
710 trusize = (trusize + ALIGNSZ - 1)/ALIGNSZ*ALIGNSZ;
711 trusize = (trusize >= MINBLKSZ) ? trusize : MINBLKSZ;
712 /* see if we have enough */
713 /* this isn't really the copy size, but I need a register */
714 cpysize = (char *)next - (char *)blk;
715 if (cpysize >= trusize) {
716 /* carve out the size we need */
717 struct header *newblk; /* remainder */
718
719 if (cpysize - trusize >= MINBLKSZ) {
720 /*
721 * carve out the right size block
722 * newblk will be the remainder
723 */
724 newblk = (struct header *)((char *)blk +
725 trusize);
726 newblk->nextblk = next;
727 blk->nextblk = SETBUSY(newblk);
728 /* at this point, next is invalid */
729 ADDFREEQ(newblk);
730 /* if blk was lastblk, make newblk lastblk */
731 if (blk == lastblk)
732 lastblk = newblk;
733 }
734 newptr = ptr;
735 } else {
736 /* bite the bullet, and call malloc */
737 cpysize = (size > cpysize) ? cpysize : size;
738 newptr = malloc_unlocked(size);
739 if (newptr == NULL)
740 return (NULL);
741 (void) memcpy(newptr, ptr, cpysize);
742 free_unlocked(ptr);
743 }
744 }
745 return (newptr);
746}
747
748
749/* LINTLIBRARY */
750/*
751 * calloc - allocate and clear memory block
752 */
753
754void *
755calloc(size_t num, size_t size)
756{
757 char *mp;
758
759 num *= size;
760 mp = malloc(num);
761 if (mp == NULL)
762 return (NULL);
763 (void) memset(mp, 0, num);
764 return (mp);
765}
766
767
768/*
769 * Mallopt - set options for allocation
770 *
771 * Mallopt provides for control over the allocation algorithm.
772 * The cmds available are:
773 *
774 * M_MXFAST Set maxfast to value. Maxfast is the size of the
775 * largest small, quickly allocated block. Maxfast
776 * may be set to 0 to disable fast allocation entirely.
777 *
778 * M_NLBLKS Set numlblks to value. Numlblks is the number of
779 * small blocks per holding block. Value must be
780 * greater than 0.
781 *
782 * M_GRAIN Set grain to value. The sizes of all blocks
783 * smaller than maxfast are considered to be rounded
784 * up to the nearest multiple of grain. The default
785 * value of grain is the smallest number of bytes
786 * which will allow alignment of any data type. Grain
787 * will be rounded up to a multiple of its default,
788 * and maxsize will be rounded up to a multiple of
789 * grain. Value must be greater than 0.
790 *
791 * M_KEEP Retain data in freed block until the next malloc,
792 * realloc, or calloc. Value is ignored.
793 * This option is provided only for compatibility with
794 * the old version of malloc, and is not recommended.
795 *
796 * returns - 0, upon successful completion
797 * 1, if malloc has previously been called or
798 * if value or cmd have illegal values
799 */
800
801int
802_mallopt(int cmd, int value)
803{
804 /* disallow changes once a small block is allocated */
805 mutex_lock(&mlock);
806 if (change) {
807 mutex_unlock(&mlock);
808 return (1);
809 }
810 switch (cmd) {
811 case M_MXFAST:
812 if (value < 0) {
813 mutex_unlock(&mlock);
814 return (1);
815 }
816 fastct = (value + grain - 1) / grain;
817 maxfast = grain*fastct;
818 break;
819 case M_NLBLKS:
820 if (value <= 1) {
821 mutex_unlock(&mlock);
822 return (1);
823 }
824 numlblks = value;
825 break;
826 case M_GRAIN:
827 if (value <= 0) {
828 mutex_unlock(&mlock);
829 return (1);
830 }
831
832 /* round grain up to a multiple of ALIGNSZ */
833 grain = (value + ALIGNSZ - 1)/ALIGNSZ*ALIGNSZ;
834
835 /* reduce fastct appropriately */
836 fastct = (maxfast + grain - 1) / grain;
837 maxfast = grain * fastct;
838 break;
839 case M_KEEP:
840 if (change && holdhead != NULL) {
841 mutex_unlock(&mlock);
842 return (1);
843 }
844 minhead = HEADSZ;
845 break;
846 default:
847 mutex_unlock(&mlock);
848 return (1);
849 }
850 mutex_unlock(&mlock);
851 return (0);
852}
853
854/*
855 * mallinfo-provide information about space usage
856 *
857 * input - max; mallinfo will return the size of the
858 * largest block < max.
859 *
860 * output - a structure containing a description of
861 * of space usage, defined in malloc.h
862 */
863
864struct mallinfo
865_mallinfo(void)
866{
867 struct header *blk, *next; /* ptr to ordinary blocks */
868 struct holdblk *hblk; /* ptr to holding blocks */
869 struct mallinfo inf; /* return value */
870 int i; /* the ubiquitous counter */
871 ssize_t size; /* size of a block */
872 ssize_t fsp; /* free space in 1 hold block */
873
874 mutex_lock(&mlock);
875 (void) memset(&inf, 0, sizeof (struct mallinfo));
876 if (freeptr[0].nextfree == GROUND) {
877 mutex_unlock(&mlock);
878 return (inf);
879 }
880 blk = CLRBUSY(arena[1].nextblk);
881 /* return total space used */
882 inf.arena = (char *)arenaend - (char *)blk;
883
884 /*
885 * loop through arena, counting # of blocks, and
886 * and space used by blocks
887 */
888 next = CLRBUSY(blk->nextblk);
889 while (next != &(arena[1])) {
890 inf.ordblks++;
891 size = (char *)next - (char *)blk;
892 if (TESTBUSY(blk->nextblk)) {
893 inf.uordblks += size;
894 inf.keepcost += HEADSZ-MINHEAD;
895 } else {
896 inf.fordblks += size;
897 }
898 blk = next;
899 next = CLRBUSY(blk->nextblk);
900 }
901
902 /*
903 * if any holding block have been allocated
904 * then examine space in holding blks
905 */
906 if (change && holdhead != NULL) {
907 for (i = fastct; i > 0; i--) { /* loop thru ea. chain */
908 hblk = holdhead[i];
909 /* do only if chain not empty */
910 if (hblk != HGROUND) {
911 size = hblk->blksz +
912 sizeof (struct lblk) - sizeof (int);
913 do { /* loop thru 1 hold blk chain */
914 inf.hblks++;
915 fsp = freespace(hblk);
916 inf.fsmblks += fsp;
917 inf.usmblks += numlblks*size - fsp;
918 inf.smblks += numlblks;
919 hblk = hblk->nexthblk;
920 } while (hblk != holdhead[i]);
921 }
922 }
923 }
924 inf.hblkhd = (inf.smblks / numlblks) * sizeof (struct holdblk);
925 /* holding block were counted in ordblks, so subtract off */
926 inf.ordblks -= inf.hblks;
927 inf.uordblks -= inf.hblkhd + inf.usmblks + inf.fsmblks;
928 inf.keepcost -= inf.hblks*(HEADSZ - MINHEAD);
929 mutex_unlock(&mlock);
930 return (inf);
931}
932
933
934/*
935 * freespace - calc. how much space is used in the free
936 * small blocks in a given holding block
937 *
938 * input - hblk = given holding block
939 *
940 * returns space used in free small blocks of hblk
941 */
942
943static ssize_t
944freespace(struct holdblk *holdblk)
945{
946 struct lblk *lblk;
947 ssize_t space = 0;
948 ssize_t size;
949 struct lblk *unused;
950
951 lblk = CLRSMAL(holdblk->lfreeq);
952 size = holdblk->blksz + sizeof (struct lblk) - sizeof (int);
953 unused = CLRSMAL(holdblk->unused);
954 /* follow free chain */
955 while ((lblk != LGROUND) && (lblk != unused)) {
956 space += size;
957 lblk = CLRSMAL(lblk->header.nextfree);
958 }
959 space += ((char *)holdblk + HOLDSZ(size)) - (char *)unused;
960 return (space);
961}
962
963static void *
964morecore(size_t bytes)
965{
966 void * ret;
967
968 if (bytes > LONG_MAX) {
969 intptr_t wad;
970 /*
971 * The request size is too big. We need to do this in
972 * chunks. Sbrk only takes an int for an arg.
973 */
974 if (bytes == ULONG_MAX)
975 return ((void *)-1);
976
977 ret = sbrk(0);
978 wad = LONG_MAX;
979 while (wad > 0) {
980 if (sbrk(wad) == (void *)-1) {
981 if (ret != sbrk(0))
982 (void) sbrk(-LONG_MAX);
983 return ((void *)-1);
984 }
985 bytes -= LONG_MAX;
986 wad = bytes;
987 }
988 } else
989 ret = sbrk(bytes);
990
991 return (ret);
992}
993
994#ifdef debug
995int
996check_arena(void)
997{
998 unsigned bsize;
999 struct header *blk, *prev, *next; /* ptr to ordinary blocks */
1000
1001 (void) mutex_lock(&mlock);
1002 if (freeptr[0].nextfree == GROUND) {
1003 (void) mutex_unlock(&mlock);
1004 return (-1);
1005 }
1006 blk = arena + 1;
1007
1008 /* loop through arena, checking */
1009 blk = (struct header *)CLRALL(blk->nextblk);
1010 next = (struct header *)CLRALL(blk->nextblk);
1011 while (next != arena + 1) {
1012 assert(blk >= arena + 1);
1013 assert(blk <= lastblk);
1014 assert(next >= blk + 1);
1015 assert(((unsigned)((struct header *)blk->nextblk) &
1016 (4 | SMAL)) == 0);
1017
1018 if (TESTBUSY(blk->nextblk) == 0) {
1019 assert(blk->nextfree >= freeptr);
1020 assert(blk->prevfree >= freeptr);
1021 assert(blk->nextfree <= lastblk);
1022 assert(blk->prevfree <= lastblk);
1023 assert(((unsigned)((struct header *)blk->nextfree) &
1024 7) == 0);
1025 assert(((unsigned)((struct header *)blk->prevfree) &
1026 7) == 0 || blk->prevfree == freeptr);
1027 }
1028 blk = next;
1029 next = CLRBUSY(blk->nextblk);
1030 }
1031 (void) mutex_unlock(&mlock);
1032 return (0);
1033}
1034
1035#define RSTALLOC 1
1036#endif
1037
1038#ifdef RSTALLOC
1039/*
1040 * rstalloc - reset alloc routines
1041 *
1042 * description - return allocated memory and reset
1043 * allocation pointers.
1044 *
1045 * Warning - This is for debugging purposes only.
1046 * It will return all memory allocated after
1047 * the first call to malloc, even if some
1048 * of it was fetched by a user's sbrk().
1049 */
1050
1051void
1052rstalloc(void)
1053{
1054 (void) mutex_lock(&mlock);
1055 minhead = MINHEAD;
1056 grain = ALIGNSZ;
1057 numlblks = NUMLBLKS;
1058 fastct = FASTCT;
1059 maxfast = MAXFAST;
1060 change = 0;
1061 if (freeptr[0].nextfree == GROUND) {
1062 (void) mutex_unlock(&mlock);
1063 return;
1064 }
1065 brk(CLRBUSY(arena[1].nextblk));
1066 freeptr[0].nextfree = GROUND;
1067#ifdef debug
1068 case1count = 0;
1069#endif
1070 (void) mutex_unlock(&mlock);
1071}
1072#endif /* RSTALLOC */
1073
1074/*
1075 * cfree is an undocumented, obsolete function
1076 */
1077
1078/* ARGSUSED */
1079void
1080_cfree(char *p, unsigned num, unsigned size)
1081{
1082 free(p);
1083}