merge in vnode changes
[unix-history] / usr / src / sys / kern / uipc_mbuf.c
... / ...
CommitLineData
1/*
2 * All rights reserved.
3 *
4 * Redistribution and use in source and binary forms are permitted
5 * provided that the above copyright notice and this paragraph are
6 * duplicated in all such forms and that any documentation,
7 * advertising materials, and other materials related to such
8 * distribution and use acknowledge that the software was developed
9 * by the University of California, Berkeley. The name of the
10 * University may not be used to endorse or promote products derived
11 * from this software without specific prior written permission.
12 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
13 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
14 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
15 *
16 * @(#)uipc_mbuf.c 7.4.1.3 (Berkeley) %G%
17 */
18
19#include "../machine/pte.h"
20
21#include "param.h"
22#include "dir.h"
23#include "user.h"
24#include "proc.h"
25#include "cmap.h"
26#include "map.h"
27#include "mbuf.h"
28#include "vm.h"
29#include "kernel.h"
30#include "syslog.h"
31#include "domain.h"
32#include "protosw.h"
33
34mbinit()
35{
36 int s;
37
38#if CLBYTES < 4096
39#define NCL_INIT (4096/CLBYTES)
40#else
41#define NCL_INIT 1
42#endif
43 s = splimp();
44 if (m_clalloc(NCL_INIT, MPG_MBUFS, M_DONTWAIT) == 0)
45 goto bad;
46 if (m_clalloc(NCL_INIT, MPG_CLUSTERS, M_DONTWAIT) == 0)
47 goto bad;
48 splx(s);
49 return;
50bad:
51 panic("mbinit");
52}
53
54/*
55 * Must be called at splimp.
56 */
57/* ARGSUSED */
58caddr_t
59m_clalloc(ncl, how, canwait)
60 register int ncl;
61 int how;
62{
63 int npg, mbx;
64 register struct mbuf *m;
65 register int i;
66 static int logged;
67
68 npg = ncl * CLSIZE;
69 mbx = rmalloc(mbmap, (long)npg);
70 if (mbx == 0) {
71 if (logged == 0) {
72 logged++;
73 log(LOG_ERR, "mbuf map full\n");
74 }
75 return (0);
76 }
77 m = cltom(mbx * NBPG / MCLBYTES);
78 if (memall(&Mbmap[mbx], npg, proc, CSYS) == 0) {
79 rmfree(mbmap, (long)npg, (long)mbx);
80 return (0);
81 }
82 vmaccess(&Mbmap[mbx], (caddr_t)m, npg);
83 switch (how) {
84
85 case MPG_CLUSTERS:
86 ncl = ncl * CLBYTES / MCLBYTES;
87 for (i = 0; i < ncl; i++) {
88 m->m_off = 0;
89 m->m_next = mclfree;
90 mclfree = m;
91 m += MCLBYTES / sizeof (*m);
92 mbstat.m_clfree++;
93 }
94 mbstat.m_clusters += ncl;
95 break;
96
97 case MPG_MBUFS:
98 for (i = ncl * CLBYTES / sizeof (*m); i > 0; i--) {
99 m->m_off = 0;
100 m->m_type = MT_DATA;
101 mbstat.m_mtypes[MT_DATA]++;
102 mbstat.m_mbufs++;
103 (void) m_free(m);
104 m++;
105 }
106 break;
107 }
108 return ((caddr_t)m);
109}
110
111/*
112 * Must be called at splimp.
113 */
114m_expand(canwait)
115 int canwait;
116{
117 register struct domain *dp;
118 register struct protosw *pr;
119 int tries;
120
121 for (tries = 0;; ) {
122 if (m_clalloc(1, MPG_MBUFS, canwait))
123 return (1);
124 if (canwait == 0 || tries++)
125 return (0);
126
127 /* ask protocols to free space */
128 for (dp = domains; dp; dp = dp->dom_next)
129 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW;
130 pr++)
131 if (pr->pr_drain)
132 (*pr->pr_drain)();
133 mbstat.m_drain++;
134 }
135}
136
137/* NEED SOME WAY TO RELEASE SPACE */
138
139/*
140 * Space allocation routines.
141 * These are also available as macros
142 * for critical paths.
143 */
144struct mbuf *
145m_get(canwait, type)
146 int canwait, type;
147{
148 register struct mbuf *m;
149
150 MGET(m, canwait, type);
151 return (m);
152}
153
154struct mbuf *
155m_getclr(canwait, type)
156 int canwait, type;
157{
158 register struct mbuf *m;
159
160 MGET(m, canwait, type);
161 if (m == 0)
162 return (0);
163 bzero(mtod(m, caddr_t), MLEN);
164 return (m);
165}
166
167struct mbuf *
168m_free(m)
169 struct mbuf *m;
170{
171 register struct mbuf *n;
172
173 MFREE(m, n);
174 return (n);
175}
176
177/*
178 * Get more mbufs; called from MGET macro if mfree list is empty.
179 * Must be called at splimp.
180 */
181/*ARGSUSED*/
182struct mbuf *
183m_more(canwait, type)
184 int canwait, type;
185{
186 register struct mbuf *m;
187
188 while (m_expand(canwait) == 0) {
189 if (canwait == M_WAIT) {
190 mbstat.m_wait++;
191 m_want++;
192 sleep((caddr_t)&mfree, PZERO - 1);
193 if (mfree)
194 break;
195 } else {
196 mbstat.m_drops++;
197 return (NULL);
198 }
199 }
200#define m_more(x,y) (panic("m_more"), (struct mbuf *)0)
201 MGET(m, canwait, type);
202#undef m_more
203 return (m);
204}
205
206m_freem(m)
207 register struct mbuf *m;
208{
209 register struct mbuf *n;
210 register int s;
211
212 if (m == NULL)
213 return;
214 s = splimp();
215 do {
216 MFREE(m, n);
217 } while (m = n);
218 splx(s);
219}
220
221/*
222 * Mbuffer utility routines.
223 */
224
225/*
226/*
227 * Make a copy of an mbuf chain starting "off" bytes from the beginning,
228 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
229 * Should get M_WAIT/M_DONTWAIT from caller.
230 */
231struct mbuf *
232m_copy(m, off, len)
233 register struct mbuf *m;
234 int off;
235 register int len;
236{
237 register struct mbuf *n, **np;
238 struct mbuf *top, *p;
239
240 if (len == 0)
241 return (0);
242 if (off < 0 || len < 0)
243 panic("m_copy");
244 while (off > 0) {
245 if (m == 0)
246 panic("m_copy");
247 if (off < m->m_len)
248 break;
249 off -= m->m_len;
250 m = m->m_next;
251 }
252 np = &top;
253 top = 0;
254 while (len > 0) {
255 if (m == 0) {
256 if (len != M_COPYALL)
257 panic("m_copy");
258 break;
259 }
260 MGET(n, M_DONTWAIT, m->m_type);
261 *np = n;
262 if (n == 0)
263 goto nospace;
264 n->m_len = MIN(len, m->m_len - off);
265 if (m->m_off > MMAXOFF) {
266 p = mtod(m, struct mbuf *);
267 n->m_off = ((int)p - (int)n) + off;
268 mclrefcnt[mtocl(p)]++;
269 } else
270 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
271 (unsigned)n->m_len);
272 if (len != M_COPYALL)
273 len -= n->m_len;
274 off = 0;
275 m = m->m_next;
276 np = &n->m_next;
277 }
278 return (top);
279nospace:
280 m_freem(top);
281 return (0);
282}
283
284/*
285 * Copy data from an mbuf chain starting "off" bytes from the beginning,
286 * continuing for "len" bytes, into the indicated buffer.
287 */
288m_copydata(m, off, len, cp)
289 register struct mbuf *m;
290 register int off;
291 register int len;
292 caddr_t cp;
293{
294 register unsigned count;
295
296 if (off < 0 || len < 0)
297 panic("m_copydata");
298 while (off > 0) {
299 if (m == 0)
300 panic("m_copydata");
301 if (off < m->m_len)
302 break;
303 off -= m->m_len;
304 m = m->m_next;
305 }
306 while (len > 0) {
307 if (m == 0)
308 panic("m_copydata");
309 count = MIN(m->m_len - off, len);
310 bcopy(mtod(m, caddr_t) + off, cp, count);
311 len -= count;
312 cp += count;
313 off = 0;
314 m = m->m_next;
315 }
316}
317
318m_cat(m, n)
319 register struct mbuf *m, *n;
320{
321 while (m->m_next)
322 m = m->m_next;
323 while (n) {
324 if (m->m_off >= MMAXOFF ||
325 m->m_off + m->m_len + n->m_len > MMAXOFF) {
326 /* just join the two chains */
327 m->m_next = n;
328 return;
329 }
330 /* splat the data from one into the other */
331 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
332 (u_int)n->m_len);
333 m->m_len += n->m_len;
334 n = m_free(n);
335 }
336}
337
338m_adj(mp, len)
339 struct mbuf *mp;
340 register int len;
341{
342 register struct mbuf *m;
343 register count;
344
345 if ((m = mp) == NULL)
346 return;
347 if (len >= 0) {
348 while (m != NULL && len > 0) {
349 if (m->m_len <= len) {
350 len -= m->m_len;
351 m->m_len = 0;
352 m = m->m_next;
353 } else {
354 m->m_len -= len;
355 m->m_off += len;
356 break;
357 }
358 }
359 } else {
360 /*
361 * Trim from tail. Scan the mbuf chain,
362 * calculating its length and finding the last mbuf.
363 * If the adjustment only affects this mbuf, then just
364 * adjust and return. Otherwise, rescan and truncate
365 * after the remaining size.
366 */
367 len = -len;
368 count = 0;
369 for (;;) {
370 count += m->m_len;
371 if (m->m_next == (struct mbuf *)0)
372 break;
373 m = m->m_next;
374 }
375 if (m->m_len >= len) {
376 m->m_len -= len;
377 return;
378 }
379 count -= len;
380 /*
381 * Correct length for chain is "count".
382 * Find the mbuf with last data, adjust its length,
383 * and toss data from remaining mbufs on chain.
384 */
385 for (m = mp; m; m = m->m_next) {
386 if (m->m_len >= count) {
387 m->m_len = count;
388 break;
389 }
390 count -= m->m_len;
391 }
392 while (m = m->m_next)
393 m->m_len = 0;
394 }
395}
396
397/*
398 * Rearange an mbuf chain so that len bytes are contiguous
399 * and in the data area of an mbuf (so that mtod and dtom
400 * will work for a structure of size len). Returns the resulting
401 * mbuf chain on success, frees it and returns null on failure.
402 * If there is room, it will add up to MPULL_EXTRA bytes to the
403 * contiguous region in an attempt to avoid being called next time.
404 */
405struct mbuf *
406m_pullup(n, len)
407 register struct mbuf *n;
408 int len;
409{
410 register struct mbuf *m;
411 register int count;
412 int space;
413
414 if (n->m_off + len <= MMAXOFF && n->m_next) {
415 m = n;
416 n = n->m_next;
417 len -= m->m_len;
418 } else {
419 if (len > MLEN)
420 goto bad;
421 MGET(m, M_DONTWAIT, n->m_type);
422 if (m == 0)
423 goto bad;
424 m->m_len = 0;
425 }
426 space = MMAXOFF - m->m_off;
427 do {
428 count = MIN(MIN(space - m->m_len, len + MPULL_EXTRA), n->m_len);
429 bcopy(mtod(n, caddr_t), mtod(m, caddr_t)+m->m_len,
430 (unsigned)count);
431 len -= count;
432 m->m_len += count;
433 n->m_len -= count;
434 if (n->m_len)
435 n->m_off += count;
436 else
437 n = m_free(n);
438 } while (len > 0 && n);
439 if (len > 0) {
440 (void) m_free(m);
441 goto bad;
442 }
443 m->m_next = n;
444 return (m);
445bad:
446 m_freem(n);
447 return (0);
448}