BSD 4_4_Lite1 development
[unix-history] / usr / src / contrib / rc-1.4 / nalloc.c
CommitLineData
9d25673b
C
1/* nalloc.c: a simple single-arena allocator for command-line-lifetime allocation */
2#include "rc.h"
3
4static struct Block {
5 size_t used, size;
6 char *mem;
7 Block *n;
8} *fl, *ul;
9
10/* alignto() works only with power of 2 blocks and assumes 2's complement arithmetic */
11#define alignto(m, n) ((m + n - 1) & ~(n - 1))
12#define BLOCKSIZE ((size_t) 4096)
13
14/* Allocate a block from the free list or malloc one if none in the fl fit */
15
16static void getblock(size_t n) {
17 Block *r, *p;
18 for (r = fl, p = NULL; r != NULL; p = r, r = r->n)
19 if (n <= r->size)
20 break; /* look for a block which fits the request */
21 if (r != NULL) { /* if one is found, take it off the free list */
22 if (p != NULL)
23 p->n = r->n;
24 else
25 fl = r->n;
26 } else { /* else allocate a new block */
27 r = enew(Block);
28 r->mem = ealloc(r->size = alignto(n, BLOCKSIZE));
29 }
30 r->used = 0;
31 r->n = ul;
32 ul = r;
33}
34
35/*
36 A fast single-arena allocator. Looks at the current block, and if
37 there is not enough room, it goes to getblock() for more. "ul"
38 stands for "used list", and the head of the list is the current
39 block. "ulp" is a register cache for "ul"; this routine is hacked
40 for speed. (sigh, optimizing RISC compilers still can't cache the
41 address of a global in a register)
42*/
43
44extern void *nalloc(size_t n) {
45 size_t base;
46 Block *ulp;
47 n = alignto(n, sizeof (ALIGN_T));
48 ulp = ul;
49 if (ulp != NULL && n + (base = ulp->used) < ulp->size) {
50 ulp->used = base + n;
51 return &ulp->mem[base];
52 } else {
53 getblock(n); /* assert(ul->used) == 0 */
54 (ulp = ul)->used = n;
55 return &ulp->mem[0];
56 }
57}
58
59/*
60 Frees memory from nalloc space by putting it on the free list.
61 Returns free blocks to the system, retaining at least MAXMEM bytes
62 worth of blocks for nalloc.
63*/
64
65#define MAXMEM 500000
66
67extern void nfree() {
68 size_t count;
69 Block *r;
70 if (ul == NULL)
71 return;
72 for (r = ul; r->n != NULL; r = r->n)
73 ; /* get to end of used list */
74 r->n = fl; /* tack free list onto it */
75 fl = ul; /* and make it the free list */
76 ul = NULL; /* finally, zero out the used list */
77 for (r = fl, count = r->size; r->n != NULL; r = r->n, count += r->size) {
78 if (count >= MAXMEM) {
79 Block *tmp = r;
80 r = r->n;
81 tmp->n = NULL; /* terminate the free list */
82 while (r != NULL) { /* free memory off the tail of the free list */
83 tmp = r->n;
84 efree(r->mem);
85 efree(r);
86 r = tmp;
87 }
88 return;
89 }
90 }
91}
92
93/*
94 "Allocates" a new arena by zeroing out the old one. Up to the
95 calling routine to keep the old value of the block around.
96*/
97
98extern Block *newblock() {
99 Block *old = ul;
100 ul = NULL;
101 return old;
102}
103
104/* "Restores" an arena to its saved value. */
105
106extern void restoreblock(Block *old) {
107 nfree();
108 ul = old;
109}
110
111/* generic memory allocation functions */
112
113extern void *ealloc(size_t n) {
114 extern void *malloc(size_t);
115 void *p = malloc(n);
116 if (p == NULL) {
117 uerror("malloc");
118 rc_exit(1);
119 }
120 return p;
121}
122
123extern void *erealloc(void *p, size_t n) {
124 extern void *realloc(void *, size_t);
125 if ((p = realloc(p, n)) == NULL) {
126 uerror("realloc");
127 rc_exit(1);
128 }
129 return p;
130}
131
132extern void efree(void *p) {
133 extern void free(void *);
134 if (p != NULL)
135 free(p);
136}
137