missing a cylinder group in brute force search
[unix-history] / usr / src / sys / kern / subr_rmap.c
CommitLineData
a29f7995 1/* subr_rmap.c 4.8 82/10/21 */
2694e78a
BJ
2
3#include "../h/param.h"
4#include "../h/systm.h"
5#include "../h/map.h"
6#include "../h/dir.h"
7#include "../h/user.h"
8#include "../h/proc.h"
2694e78a 9#include "../h/text.h"
39d536e6 10#include "../h/kernel.h"
2694e78a 11
b725a0ca
BJ
12/*
13 * Resource map handling routines.
14 *
15 * A resource map is an array of structures each
16 * of which describes a segment of the address space of an available
17 * resource. The segments are described by their base address and
18 * length, and sorted in address order. Each resource map has a fixed
19 * maximum number of segments allowed. Resources are allocated
20 * by taking part or all of one of the segments of the map.
21 *
22 * Returning of resources will require another segment if
23 * the returned resources are not adjacent in the address
24 * space to an existing segment. If the return of a segment
25 * would require a slot which is not available, then one of
26 * the resource map segments is discarded after a warning is printed.
27 * Returning of resources may also cause the map to collapse
28 * by coalescing two existing segments and the returned space
29 * into a single segment. In this case the resource map is
30 * made smaller by copying together to fill the resultant gap.
31 *
32 * N.B.: the current implementation uses a dense array and does
33 * not admit the value ``0'' as a legal address, since that is used
34 * as a delimiter.
35 */
36
37/*
38 * Initialize map mp to have (mapsize-2) segments
39 * and to be called ``name'', which we print if
40 * the slots become so fragmented that we lose space.
41 * The map itself is initialized with size elements free
42 * starting at addr.
43 */
44rminit(mp, size, addr, name, mapsize)
45 register struct map *mp;
a29f7995 46 long size, addr;
b725a0ca
BJ
47 char *name;
48 int mapsize;
49{
50 register struct mapent *ep = (struct mapent *)(mp+1);
51
52 mp->m_name = name;
53/* N.B.: WE ASSUME HERE THAT sizeof (struct map) == sizeof (struct mapent) */
54 /*
55 * One of the mapsize slots is taken by the map structure,
56 * segments has size 0 and addr 0, and acts as a delimiter.
57 * We insure that we never use segments past the end of
58 * the array which is given by mp->m_limit.
59 * Instead, when excess segments occur we discard some resources.
60 */
61 mp->m_limit = (struct mapent *)&mp[mapsize];
62 /*
63 * Simulate a rmfree(), but with the option to
64 * call with size 0 and addr 0 when we just want
65 * to initialize without freeing.
66 */
67 ep->m_size = size;
68 ep->m_addr = addr;
69}
70
2694e78a
BJ
71/*
72 * Allocate 'size' units from the given
b725a0ca 73 * map. Return the base of the allocated space.
2694e78a
BJ
74 * In a map, the addresses are increasing and the
75 * list is terminated by a 0 size.
b725a0ca 76 *
2694e78a 77 * Algorithm is first-fit.
b725a0ca
BJ
78 *
79 * This routine knows about the interleaving of the swapmap
80 * and handles that.
2694e78a 81 */
39d536e6 82long
b725a0ca
BJ
83rmalloc(mp, size)
84 register struct map *mp;
39d536e6 85 long size;
2694e78a 86{
b725a0ca
BJ
87 register struct mapent *ep = (struct mapent *)(mp+1);
88 register int addr;
89 register struct mapent *bp;
41888f16 90 swblk_t first, rest;
2694e78a 91
41888f16 92 if (size <= 0 || mp == swapmap && size > DMMAX)
b725a0ca
BJ
93 panic("rmalloc");
94 /*
95 * Search for a piece of the resource map which has enough
96 * free space to accomodate the request.
97 */
98 for (bp = ep; bp->m_size; bp++) {
2694e78a 99 if (bp->m_size >= size) {
b725a0ca
BJ
100 /*
101 * If allocating from swapmap,
102 * then have to respect interleaving
103 * boundaries.
104 */
41888f16
BJ
105 if (mp == swapmap &&
106 (first = DMMAX - bp->m_addr%DMMAX) < bp->m_size) {
107 if (bp->m_size - first < size)
108 continue;
b725a0ca 109 addr = bp->m_addr + first;
41888f16
BJ
110 rest = bp->m_size - first - size;
111 bp->m_size = first;
112 if (rest)
b725a0ca
BJ
113 rmfree(swapmap, rest, addr+size);
114 return (addr);
41888f16 115 }
b725a0ca
BJ
116 /*
117 * Allocate from the map.
118 * If there is no space left of the piece
119 * we allocated from, move the rest of
120 * the pieces to the left.
121 */
122 addr = bp->m_addr;
2694e78a
BJ
123 bp->m_addr += size;
124 if ((bp->m_size -= size) == 0) {
125 do {
126 bp++;
127 (bp-1)->m_addr = bp->m_addr;
128 } while ((bp-1)->m_size = bp->m_size);
129 }
b725a0ca
BJ
130 if (mp == swapmap && addr % CLSIZE)
131 panic("rmalloc swapmap");
132 return (addr);
2694e78a
BJ
133 }
134 }
b725a0ca 135 return (0);
2694e78a
BJ
136}
137
138/*
b725a0ca 139 * Free the previously allocated space at addr
2694e78a 140 * of size units into the specified map.
b725a0ca 141 * Sort addr into map and combine on
2694e78a
BJ
142 * one or both ends if possible.
143 */
b725a0ca
BJ
144rmfree(mp, size, addr)
145 struct map *mp;
39d536e6 146 long size, addr;
2694e78a 147{
b725a0ca
BJ
148 struct mapent *firstbp;
149 register struct mapent *bp;
2694e78a
BJ
150 register int t;
151
b725a0ca
BJ
152 /*
153 * Both address and size must be
154 * positive, or the protocol has broken down.
155 */
156 if (addr <= 0 || size <= 0)
157 goto badrmfree;
158 /*
159 * Locate the piece of the map which starts after the
160 * returned space (or the end of the map).
161 */
162 firstbp = bp = (struct mapent *)(mp + 1);
163 for (; bp->m_addr <= addr && bp->m_size != 0; bp++)
2694e78a 164 continue;
b725a0ca
BJ
165 /*
166 * If the piece on the left abuts us,
167 * then we should combine with it.
168 */
169 if (bp > firstbp && (bp-1)->m_addr+(bp-1)->m_size >= addr) {
170 /*
171 * Check no overlap (internal error).
172 */
173 if ((bp-1)->m_addr+(bp-1)->m_size > addr)
174 goto badrmfree;
175 /*
176 * Add into piece on the left by increasing its size.
177 */
2694e78a 178 (bp-1)->m_size += size;
b725a0ca
BJ
179 /*
180 * If the combined piece abuts the piece on
181 * the right now, compress it in also,
182 * by shifting the remaining pieces of the map over.
183 */
184 if (bp->m_addr && addr+size >= bp->m_addr) {
185 if (addr+size > bp->m_addr)
186 goto badrmfree;
2694e78a
BJ
187 (bp-1)->m_size += bp->m_size;
188 while (bp->m_size) {
189 bp++;
190 (bp-1)->m_addr = bp->m_addr;
191 (bp-1)->m_size = bp->m_size;
192 }
193 }
b725a0ca
BJ
194 goto done;
195 }
196 /*
197 * Don't abut on the left, check for abutting on
198 * the right.
199 */
200 if (addr+size >= bp->m_addr && bp->m_size) {
201 if (addr+size > bp->m_addr)
202 goto badrmfree;
203 bp->m_addr -= size;
204 bp->m_size += size;
205 goto done;
206 }
207 /*
208 * Don't abut at all. Make a new entry
209 * and check for map overflow.
210 */
211 do {
212 t = bp->m_addr;
213 bp->m_addr = addr;
214 addr = t;
215 t = bp->m_size;
216 bp->m_size = size;
217 bp++;
218 } while (size = t);
219 /*
220 * Segment at bp is to be the delimiter;
221 * If there is not room for it
222 * then the table is too full
223 * and we must discard something.
224 */
225 if (bp+1 > mp->m_limit) {
226 /*
227 * Back bp up to last available segment.
228 * which contains a segment already and must
229 * be made into the delimiter.
230 * Discard second to last entry,
231 * since it is presumably smaller than the last
232 * and move the last entry back one.
233 */
234 bp--;
f4c13170 235 printf("%s: rmap ovflo, lost [%d,%d)\n", mp->m_name,
b725a0ca
BJ
236 (bp-1)->m_addr, (bp-1)->m_addr+(bp-1)->m_size);
237 bp[-1] = bp[0];
238 bp[0].m_size = bp[0].m_addr = 0;
2694e78a 239 }
b725a0ca
BJ
240done:
241 /*
242 * THIS IS RIDICULOUS... IT DOESN'T BELONG HERE!
243 */
2694e78a
BJ
244 if ((mp == kernelmap) && kmapwnt) {
245 kmapwnt = 0;
246 wakeup((caddr_t)kernelmap);
247 }
b725a0ca
BJ
248 return;
249badrmfree:
250 panic("bad rmfree");
2694e78a 251}
1c1f6ecf
BF
252
253/*
254 * Allocate 'size' units from the given map, starting at address 'addr'.
255 * Return 'addr' if successful, 0 if not.
256 * This may cause the creation or destruction of a resource map segment.
257 *
258 * This routine will return failure status if there is not enough room
259 * for a required additional map segment.
260 *
261 * An attempt to use this on 'swapmap' will result in
262 * a failure return. This is due mainly to laziness and could be fixed
263 * to do the right thing, although it probably will never be used.
264 */
265rmget(mp, size, addr)
266 register struct map *mp;
267{
268 register struct mapent *ep = (struct mapent *)(mp+1);
269 register struct mapent *bp, *bp2;
270
271 if (size <= 0)
272 panic("rmget");
273 if (mp == swapmap)
274 return (0);
275 /*
276 * Look for a map segment containing the requested address.
277 * If none found, return failure.
278 */
279 for (bp = ep; bp->m_size; bp++)
280 if (bp->m_addr <= addr && bp->m_addr + bp->m_size > addr)
281 break;
282 if (bp->m_size == 0)
283 return (0);
284
285 /*
286 * If segment is too small, return failure.
287 * If big enough, allocate the block, compressing or expanding
288 * the map as necessary.
289 */
290 if (bp->m_addr + bp->m_size < addr + size)
291 return (0);
292 if (bp->m_addr == addr)
293 if (bp->m_addr + bp->m_size == addr + size) {
294 /*
295 * Allocate entire segment and compress map
296 */
297 bp2 = bp;
298 while (bp2->m_size) {
299 bp2++;
300 (bp2-1)->m_addr = bp2->m_addr;
301 (bp2-1)->m_size = bp2->m_size;
302 }
303 } else {
304 /*
305 * Allocate first part of segment
306 */
307 bp->m_addr += size;
308 bp->m_size -= size;
309 }
310 else
311 if (bp->m_addr + bp->m_size == addr + size) {
312 /*
313 * Allocate last part of segment
314 */
315 bp->m_size -= size;
316 } else {
317 /*
318 * Allocate from middle of segment, but only
319 * if table can be expanded.
320 */
321 for (bp2=bp; bp2->m_size; bp2++)
322 ;
323 if (bp2 == mp->m_limit)
324 return (0);
325 while (bp2 > bp) {
326 (bp2+1)->m_addr = bp2->m_addr;
327 (bp2+1)->m_size = bp2->m_size;
328 bp2--;
329 }
330 (bp+1)->m_addr = addr + size;
331 (bp+1)->m_size =
332 bp->m_addr + bp->m_size - (addr + size);
333 bp->m_size = addr - bp->m_addr;
334 }
335 return (addr);
336}