* Copyright (c) 1987 Regents of the University of California.
* Redistribution and use in source and binary forms are permitted
* provided that this notice is preserved and that due credit is given
* to the University of California at Berkeley. The name of the University
* may not be used to endorse or promote products derived from this
* software without specific prior written permission. This software
* is provided ``as is'' without express or implied warranty.
* @(#)kern_malloc.c 7.8 (Berkeley) %G%
#include "../machine/pte.h"
struct kmembuckets bucket
[MINBUCKET
+ 16];
struct kmemstats kmemstats
[M_LAST
];
struct kmemusage
*kmemusage
;
* Allocate a block of memory
malloc(size
, type
, flags
)
register struct kmembuckets
*kbp
;
register struct kmemusage
*kup
;
long indx
, npg
, alloc
, allocsize
, s
;
register struct kmemstats
*ksp
= &kmemstats
[type
];
while (ksp
->ks_memuse
>= ksp
->ks_limit
) {
if (ksp
->ks_limblocks
< 65535)
sleep((caddr_t
)ksp
, PSWP
+2);
if (kbp
->kb_next
== NULL
) {
allocsize
= roundup(size
, CLBYTES
);
npg
= clrnd(btoc(allocsize
));
if ((flags
& M_NOWAIT
) && freemem
< npg
) {
alloc
= rmalloc(kmemmap
, npg
);
if (ksp
->ks_mapblocks
< 65535)
sleep((caddr_t
)&wantkmemmap
, PSWP
+2);
alloc
-= CLSIZE
; /* convert to base 0 */
(void) vmemall(&kmempt
[alloc
], npg
, &proc
[0], CSYS
);
va
= (caddr_t
) kmemxtob(alloc
);
vmaccess(&kmempt
[alloc
], va
, npg
);
kbp
->kb_total
+= kbp
->kb_elmpercl
;
if (allocsize
> MAXALLOCSAVE
) {
panic("malloc: allocation too large");
ksp
->ks_memuse
+= allocsize
;
kup
->ku_freecnt
= kbp
->kb_elmpercl
;
kbp
->kb_totalfree
+= kbp
->kb_elmpercl
;
kbp
->kb_next
= va
+ (npg
* NBPG
) - allocsize
;
for (cp
= kbp
->kb_next
; cp
> va
; cp
-= allocsize
)
*(caddr_t
*)cp
= cp
- allocsize
;
kbp
->kb_next
= *(caddr_t
*)va
;
if (kup
->ku_indx
!= indx
)
panic("malloc: wrong bucket");
if (kup
->ku_freecnt
== 0)
panic("malloc: lost data");
ksp
->ks_memuse
+= 1 << indx
;
if (ksp
->ks_memuse
> ksp
->ks_maxused
)
ksp
->ks_maxused
= ksp
->ks_memuse
;
* Free a block of memory allocated by malloc.
register struct kmembuckets
*kbp
;
register struct kmemusage
*kup
;
register struct kmemstats
*ksp
= &kmemstats
[type
];
kbp
= &bucket
[kup
->ku_indx
];
size
= 1 << kup
->ku_indx
;
if (size
> MAXALLOCSAVE
) {
(void) memfree(&kmempt
[alloc
], kup
->ku_pagecnt
, 0);
rmfree(kmemmap
, (long)kup
->ku_pagecnt
, alloc
+ CLSIZE
);
wakeup((caddr_t
)&wantkmemmap
);
size
= kup
->ku_pagecnt
<< PGSHIFT
;
if (ksp
->ks_memuse
+ size
>= ksp
->ks_limit
&&
ksp
->ks_memuse
< ksp
->ks_limit
)
if (kup
->ku_freecnt
>= kbp
->kb_elmpercl
)
if (kup
->ku_freecnt
> kbp
->kb_elmpercl
)
panic("free: multiple frees");
else if (kbp
->kb_totalfree
> kbp
->kb_highwat
)
if (ksp
->ks_memuse
+ size
>= ksp
->ks_limit
&&
ksp
->ks_memuse
< ksp
->ks_limit
)
*(caddr_t
*)addr
= kbp
->kb_next
;
* Initialize the kernel memory allocator
if (!powerof2(MAXALLOCSAVE
))
panic("kmeminit: MAXALLOCSAVE not power of 2");
if (MAXALLOCSAVE
> MINALLOCSIZE
* 32768)
panic("kmeminit: MAXALLOCSAVE too big");
if (MAXALLOCSAVE
< CLBYTES
)
panic("kmeminit: MAXALLOCSAVE too small");
rminit(kmemmap
, npg
, (long)CLSIZE
, "malloc map", npg
);
for (indx
= 0; indx
< MINBUCKET
+ 16; indx
++) {
if (1 << indx
>= CLBYTES
)
bucket
[indx
].kb_elmpercl
= 1;
bucket
[indx
].kb_elmpercl
= CLBYTES
/ (1 << indx
);
bucket
[indx
].kb_highwat
= 5 * bucket
[indx
].kb_elmpercl
;
for (indx
= 0; indx
< M_LAST
; indx
++)
kmemstats
[indx
].ks_limit
= npg
* CLBYTES
* 8 / 10;