* Copyright (c) 1987 Regents of the University of California.
* %sccs.include.redist.c%
* @(#)kern_malloc.c 7.22 (Berkeley) %G%
#include "../vm/vm_param.h"
#include "../vm/vm_map.h"
#include "../vm/vm_kern.h"
struct kmembuckets bucket
[MINBUCKET
+ 16];
struct kmemstats kmemstats
[M_LAST
];
struct kmemusage
*kmemusage
;
char *memname
[] = INITKMEMNAMES
;
char *kmembase
, *kmemlimit
;
char *memname
[] = INITKMEMNAMES
;
#define IN { if (malloc_reentered) panic("malloc reentered");\
else malloc_reentered = 1;}
#define OUT (malloc_reentered = 0)
* Allocate a block of memory
malloc(size
, type
, flags
)
register struct kmembuckets
*kbp
;
register struct kmemusage
*kup
;
long indx
, npg
, alloc
, allocsize
;
register struct kmemstats
*ksp
= &kmemstats
[type
];
if (((unsigned long)type
) > M_LAST
)
panic("malloc - bogus type");
while (ksp
->ks_memuse
>= ksp
->ks_limit
) {
if (ksp
->ks_limblocks
< 65535)
tsleep((caddr_t
)ksp
, PSWP
+2, memname
[type
], 0);
if (kbp
->kb_next
== NULL
) {
allocsize
= roundup(size
, CLBYTES
);
npg
= clrnd(btoc(allocsize
));
va
= (caddr_t
) kmem_malloc(kmem_map
, (vm_size_t
)ctob(npg
),
kbp
->kb_total
+= kbp
->kb_elmpercl
;
if (allocsize
> MAXALLOCSAVE
) {
panic("malloc: allocation too large");
ksp
->ks_memuse
+= allocsize
;
kup
->ku_freecnt
= kbp
->kb_elmpercl
;
kbp
->kb_totalfree
+= kbp
->kb_elmpercl
;
* Just in case we blocked while allocating memory,
* and someone else also allocated memory for this
* bucket, don't assume the list is still empty.
savedlist
= kbp
->kb_next
;
rp
= kbp
->kb_next
; /* returned while blocked in vmemall */
kbp
->kb_next
= va
+ (npg
* NBPG
) - allocsize
;
for (cp
= kbp
->kb_next
; cp
>= va
; cp
-= allocsize
) {
((caddr_t
*)cp
)[2] = (cp
> va
? cp
- allocsize
: rp
);
lp
[0] = lp
[1] = lp
[3] = lp
[4] = -1;
kbp
->kb_next
= ((caddr_t
*)va
)[2];
if (lp
[0] != -1 || lp
[1] != -1 || lp
[3] != -1 || lp
[4] != -1)
if (kup
->ku_indx
!= indx
)
panic("malloc: wrong bucket");
if (kup
->ku_freecnt
== 0)
panic("malloc: lost data");
ksp
->ks_memuse
+= 1 << indx
;
if (ksp
->ks_memuse
> ksp
->ks_maxused
)
ksp
->ks_maxused
= ksp
->ks_memuse
;
long addrmask
[] = { 0x00000000,
0x00000001, 0x00000003, 0x00000007, 0x0000000f,
0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
long addrmask
[] = { 0x00000000,
0x00000001, 0x00000003, 0x00000007, 0x0000000f,
0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
* Free a block of memory allocated by malloc.
register struct kmembuckets
*kbp
;
register struct kmemusage
*kup
;
register struct kmemstats
*ksp
= &kmemstats
[type
];
size
= 1 << kup
->ku_indx
;
if (size
> NBPG
* CLSIZE
)
alloc
= addrmask
[BUCKETINDX(NBPG
* CLSIZE
)];
alloc
= addrmask
[kup
->ku_indx
];
if (((u_long
)addr
& alloc
) != 0) {
printf("free: unaligned addr 0x%x, size %d, type %d, mask %d\n",
addr
, size
, type
, alloc
);
panic("free: unaligned addr");
size
= 1 << kup
->ku_indx
;
if (size
> NBPG
* CLSIZE
)
alloc
= addrmask
[BUCKETINDX(NBPG
* CLSIZE
)];
alloc
= addrmask
[kup
->ku_indx
];
if (((u_long
)addr
& alloc
) != 0) {
printf("free: unaligned addr 0x%x, size %d, type %d, mask %d\n",
addr
, size
, type
, alloc
);
panic("free: unaligned addr");
kbp
= &bucket
[kup
->ku_indx
];
if (size
> MAXALLOCSAVE
) {
kmem_free(kmem_map
, (vm_offset_t
)addr
, ctob(kup
->ku_pagecnt
));
size
= kup
->ku_pagecnt
<< PGSHIFT
;
if (ksp
->ks_memuse
+ size
>= ksp
->ks_limit
&&
ksp
->ks_memuse
< ksp
->ks_limit
)
lp
[0] = lp
[1] = lp
[3] = lp
[4] = -1;
if (kup
->ku_freecnt
>= kbp
->kb_elmpercl
)
if (kup
->ku_freecnt
> kbp
->kb_elmpercl
)
panic("free: multiple frees");
else if (kbp
->kb_totalfree
> kbp
->kb_highwat
)
if (ksp
->ks_memuse
+ size
>= ksp
->ks_limit
&&
ksp
->ks_memuse
< ksp
->ks_limit
)
((caddr_t
*)addr
)[2] = kbp
->kb_next
;
* Initialize the kernel memory allocator
#if ((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0)
ERROR
!_kmeminit
:_MAXALLOCSAVE_not_power_of_2
#if (MAXALLOCSAVE > MINALLOCSIZE * 32768)
ERROR
!_kmeminit
:_MAXALLOCSAVE_too_big
#if (MAXALLOCSAVE < CLBYTES)
ERROR
!_kmeminit
:_MAXALLOCSAVE_too_small
npg
= VM_KMEM_SIZE
/ NBPG
;
kmemusage
= (struct kmemusage
*) kmem_alloc(kernel_map
,
(vm_size_t
)(npg
* sizeof(struct kmemusage
)));
kmem_map
= kmem_suballoc(kernel_map
, (vm_offset_t
)&kmembase
,
(vm_offset_t
)&kmemlimit
, (vm_size_t
)(npg
* NBPG
), FALSE
);
for (indx
= 0; indx
< MINBUCKET
+ 16; indx
++) {
if (1 << indx
>= CLBYTES
)
bucket
[indx
].kb_elmpercl
= 1;
bucket
[indx
].kb_elmpercl
= CLBYTES
/ (1 << indx
);
bucket
[indx
].kb_highwat
= 5 * bucket
[indx
].kb_elmpercl
;
for (indx
= 0; indx
< M_LAST
; indx
++)
kmemstats
[indx
].ks_limit
= npg
* NBPG
* 6 / 10;