Removed all patch kit headers, sccsid and rcsid strings, put $Id$ in, some
[unix-history] / sys / kern / uipc_mbuf.c
CommitLineData
15637ed4
RG
1/*
2 * Copyright (c) 1982, 1986, 1988, 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)uipc_mbuf.c 7.19 (Berkeley) 4/20/91
34 *
35 * PATCHES MAGIC LEVEL PATCH THAT GOT US HERE
36 * -------------------- ----- ----------------------
37 * CURRENT PATCH LEVEL: 1 00009
38 * -------------------- ----- ----------------------
39 *
40 * 31 Aug 92 Wolfgang Solfrank Fixed mbuf allocation code
41 */
42
43#include "param.h"
dd18dc33 44#include "systm.h"
15637ed4
RG
45#include "proc.h"
46#include "malloc.h"
47#define MBTYPES
48#include "mbuf.h"
49#include "kernel.h"
50#include "syslog.h"
51#include "domain.h"
52#include "protosw.h"
53#include "vm/vm.h"
54
55extern vm_map_t mb_map;
56struct mbuf *mbutl;
57char *mclrefcnt;
58
59mbinit()
60{
61 int s;
62
63#if CLBYTES < 4096
64#define NCL_INIT (4096/CLBYTES)
65#else
66#define NCL_INIT 1
67#endif
68 s = splimp();
69 if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0)
70 goto bad;
71 splx(s);
72 return;
73bad:
74 panic("mbinit");
75}
76
77/*
78 * Allocate some number of mbuf clusters
79 * and place on cluster free list.
80 * Must be called at splimp.
81 */
82/* ARGSUSED */
83m_clalloc(ncl, how) /* 31 Aug 92*/
84 register int ncl;
85{
86 int npg, mbx;
87 register caddr_t p;
88 register int i;
89 static int logged;
90
91 npg = ncl * CLSIZE;
92 /* 31 Aug 92*/
93 p = (caddr_t)kmem_malloc(mb_map, ctob(npg), !(how&M_DONTWAIT));
94 if (p == NULL) {
95 if (logged == 0) {
96 logged++;
97 log(LOG_ERR, "mb_map full\n");
98 }
99 return (0);
100 }
101 ncl = ncl * CLBYTES / MCLBYTES;
102 for (i = 0; i < ncl; i++) {
103 ((union mcluster *)p)->mcl_next = mclfree;
104 mclfree = (union mcluster *)p;
105 p += MCLBYTES;
106 mbstat.m_clfree++;
107 }
108 mbstat.m_clusters += ncl;
109 return (1);
110}
111
112/*
113 * When MGET failes, ask protocols to free space when short of memory,
114 * then re-attempt to allocate an mbuf.
115 */
116struct mbuf *
117m_retry(i, t)
118 int i, t;
119{
120 register struct mbuf *m;
121
122 m_reclaim();
123#define m_retry(i, t) (struct mbuf *)0
124 MGET(m, i, t);
125#undef m_retry
126 return (m);
127}
128
129/*
130 * As above; retry an MGETHDR.
131 */
132struct mbuf *
133m_retryhdr(i, t)
134 int i, t;
135{
136 register struct mbuf *m;
137
138 m_reclaim();
139#define m_retryhdr(i, t) (struct mbuf *)0
140 MGETHDR(m, i, t);
141#undef m_retryhdr
142 return (m);
143}
144
145m_reclaim()
146{
147 register struct domain *dp;
148 register struct protosw *pr;
149 int s = splimp();
150
151 for (dp = domains; dp; dp = dp->dom_next)
152 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
153 if (pr->pr_drain)
154 (*pr->pr_drain)();
155 splx(s);
156 mbstat.m_drain++;
157}
158
159/*
160 * Space allocation routines.
161 * These are also available as macros
162 * for critical paths.
163 */
164struct mbuf *
165m_get(how, type) /* 31 Aug 92*/
166 int how, type;
167{
168 register struct mbuf *m;
169
170 MGET(m, how, type);
171 return (m);
172}
173
174struct mbuf *
175m_gethdr(how, type) /* 31 Aug 92*/
176 int how, type;
177{
178 register struct mbuf *m;
179
180 MGETHDR(m, how, type);
181 return (m);
182}
183
184struct mbuf *
185m_getclr(how, type) /* 31 Aug 92*/
186 int how, type;
187{
188 register struct mbuf *m;
189
190 MGET(m, how, type);
191 if (m == 0)
192 return (0);
193 bzero(mtod(m, caddr_t), MLEN);
194 return (m);
195}
196
197struct mbuf *
198m_free(m)
199 struct mbuf *m;
200{
201 register struct mbuf *n;
202
203 MFREE(m, n);
204 return (n);
205}
206
207m_freem(m)
208 register struct mbuf *m;
209{
210 register struct mbuf *n;
211
212 if (m == NULL)
213 return;
214 do {
215 MFREE(m, n);
216 } while (m = n);
217}
218
219/*
220 * Mbuffer utility routines.
221 */
222
223/*
224 * Lesser-used path for M_PREPEND:
225 * allocate new mbuf to prepend to chain,
226 * copy junk along.
227 */
228struct mbuf *
229m_prepend(m, len, how)
230 register struct mbuf *m;
231 int len, how;
232{
233 struct mbuf *mn;
234
235 MGET(mn, how, m->m_type);
236 if (mn == (struct mbuf *)NULL) {
237 m_freem(m);
238 return ((struct mbuf *)NULL);
239 }
240 if (m->m_flags & M_PKTHDR) {
241 M_COPY_PKTHDR(mn, m);
242 m->m_flags &= ~M_PKTHDR;
243 }
244 mn->m_next = m;
245 m = mn;
246 if (len < MHLEN)
247 MH_ALIGN(m, len);
248 m->m_len = len;
249 return (m);
250}
251
252/*
253 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
254 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
255 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
256 */
257int MCFail;
258
259struct mbuf *
260m_copym(m, off0, len, wait)
261 register struct mbuf *m;
262 int off0, wait;
263 register int len;
264{
265 register struct mbuf *n, **np;
266 register int off = off0;
267 struct mbuf *top;
268 int copyhdr = 0;
269
270 if (off < 0 || len < 0)
271 panic("m_copym");
272 if (off == 0 && m->m_flags & M_PKTHDR)
273 copyhdr = 1;
274 while (off > 0) {
275 if (m == 0)
276 panic("m_copym");
277 if (off < m->m_len)
278 break;
279 off -= m->m_len;
280 m = m->m_next;
281 }
282 np = &top;
283 top = 0;
284 while (len > 0) {
285 if (m == 0) {
286 if (len != M_COPYALL)
287 panic("m_copym");
288 break;
289 }
290 MGET(n, wait, m->m_type);
291 *np = n;
292 if (n == 0)
293 goto nospace;
294 if (copyhdr) {
295 M_COPY_PKTHDR(n, m);
296 if (len == M_COPYALL)
297 n->m_pkthdr.len -= off0;
298 else
299 n->m_pkthdr.len = len;
300 copyhdr = 0;
301 }
302 n->m_len = MIN(len, m->m_len - off);
303 if (m->m_flags & M_EXT) {
304 n->m_data = m->m_data + off;
305 mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
306 n->m_ext = m->m_ext;
307 n->m_flags |= M_EXT;
308 } else
309 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
310 (unsigned)n->m_len);
311 if (len != M_COPYALL)
312 len -= n->m_len;
313 off = 0;
314 m = m->m_next;
315 np = &n->m_next;
316 }
317 if (top == 0)
318 MCFail++;
319 return (top);
320nospace:
321 m_freem(top);
322 MCFail++;
323 return (0);
324}
325
326/*
327 * Copy data from an mbuf chain starting "off" bytes from the beginning,
328 * continuing for "len" bytes, into the indicated buffer.
329 */
330m_copydata(m, off, len, cp)
331 register struct mbuf *m;
332 register int off;
333 register int len;
334 caddr_t cp;
335{
336 register unsigned count;
337
338 if (off < 0 || len < 0)
339 panic("m_copydata");
340 while (off > 0) {
341 if (m == 0)
342 panic("m_copydata");
343 if (off < m->m_len)
344 break;
345 off -= m->m_len;
346 m = m->m_next;
347 }
348 while (len > 0) {
349 if (m == 0)
350 panic("m_copydata");
351 count = MIN(m->m_len - off, len);
352 bcopy(mtod(m, caddr_t) + off, cp, count);
353 len -= count;
354 cp += count;
355 off = 0;
356 m = m->m_next;
357 }
358}
359
360/*
361 * Concatenate mbuf chain n to m.
362 * Both chains must be of the same type (e.g. MT_DATA).
363 * Any m_pkthdr is not updated.
364 */
365m_cat(m, n)
366 register struct mbuf *m, *n;
367{
368 while (m->m_next)
369 m = m->m_next;
370 while (n) {
371 if (m->m_flags & M_EXT ||
372 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
373 /* just join the two chains */
374 m->m_next = n;
375 return;
376 }
377 /* splat the data from one into the other */
378 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
379 (u_int)n->m_len);
380 m->m_len += n->m_len;
381 n = m_free(n);
382 }
383}
384
385m_adj(mp, req_len)
386 struct mbuf *mp;
387{
388 register int len = req_len;
389 register struct mbuf *m;
390 register count;
391
392 if ((m = mp) == NULL)
393 return;
394 if (len >= 0) {
395 /*
396 * Trim from head.
397 */
398 while (m != NULL && len > 0) {
399 if (m->m_len <= len) {
400 len -= m->m_len;
401 m->m_len = 0;
402 m = m->m_next;
403 } else {
404 m->m_len -= len;
405 m->m_data += len;
406 len = 0;
407 }
408 }
409 m = mp;
410 if (mp->m_flags & M_PKTHDR)
411 m->m_pkthdr.len -= (req_len - len);
412 } else {
413 /*
414 * Trim from tail. Scan the mbuf chain,
415 * calculating its length and finding the last mbuf.
416 * If the adjustment only affects this mbuf, then just
417 * adjust and return. Otherwise, rescan and truncate
418 * after the remaining size.
419 */
420 len = -len;
421 count = 0;
422 for (;;) {
423 count += m->m_len;
424 if (m->m_next == (struct mbuf *)0)
425 break;
426 m = m->m_next;
427 }
428 if (m->m_len >= len) {
429 m->m_len -= len;
430 if ((mp = m)->m_flags & M_PKTHDR)
431 m->m_pkthdr.len -= len;
432 return;
433 }
434 count -= len;
435 if (count < 0)
436 count = 0;
437 /*
438 * Correct length for chain is "count".
439 * Find the mbuf with last data, adjust its length,
440 * and toss data from remaining mbufs on chain.
441 */
442 m = mp;
443 if (m->m_flags & M_PKTHDR)
444 m->m_pkthdr.len = count;
445 for (; m; m = m->m_next) {
446 if (m->m_len >= count) {
447 m->m_len = count;
448 break;
449 }
450 count -= m->m_len;
451 }
452 while (m = m->m_next)
453 m->m_len = 0;
454 }
455}
456
457/*
458 * Rearange an mbuf chain so that len bytes are contiguous
459 * and in the data area of an mbuf (so that mtod and dtom
460 * will work for a structure of size len). Returns the resulting
461 * mbuf chain on success, frees it and returns null on failure.
462 * If there is room, it will add up to max_protohdr-len extra bytes to the
463 * contiguous region in an attempt to avoid being called next time.
464 */
465int MPFail;
466
467struct mbuf *
468m_pullup(n, len)
469 register struct mbuf *n;
470 int len;
471{
472 register struct mbuf *m;
473 register int count;
474 int space;
475
476 /*
477 * If first mbuf has no cluster, and has room for len bytes
478 * without shifting current data, pullup into it,
479 * otherwise allocate a new mbuf to prepend to the chain.
480 */
481 if ((n->m_flags & M_EXT) == 0 &&
482 n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
483 if (n->m_len >= len)
484 return (n);
485 m = n;
486 n = n->m_next;
487 len -= m->m_len;
488 } else {
489 if (len > MHLEN)
490 goto bad;
491 MGET(m, M_DONTWAIT, n->m_type);
492 if (m == 0)
493 goto bad;
494 m->m_len = 0;
495 if (n->m_flags & M_PKTHDR) {
496 M_COPY_PKTHDR(m, n);
497 n->m_flags &= ~M_PKTHDR;
498 }
499 }
500 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
501 do {
502 count = min(min(max(len, max_protohdr), space), n->m_len);
503 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
504 (unsigned)count);
505 len -= count;
506 m->m_len += count;
507 n->m_len -= count;
508 space -= count;
509 if (n->m_len)
510 n->m_data += count;
511 else
512 n = m_free(n);
513 } while (len > 0 && n);
514 if (len > 0) {
515 (void) m_free(m);
516 goto bad;
517 }
518 m->m_next = n;
519 return (m);
520bad:
521 m_freem(n);
522 MPFail++;
523 return (0);
524}