X-Git-Url: https://git.subgeniuskitty.com/unix-history/.git/blobdiff_plain/e495e1ccd06185fbec39733a5598dd424ec9e7f5..a937f8567ba9375553a85507f18042a6faaffaab:/usr/src/sys/kern/uipc_mbuf.c diff --git a/usr/src/sys/kern/uipc_mbuf.c b/usr/src/sys/kern/uipc_mbuf.c index 1d7dc646ed..d301a648ec 100644 --- a/usr/src/sys/kern/uipc_mbuf.c +++ b/usr/src/sys/kern/uipc_mbuf.c @@ -1,125 +1,140 @@ -/* uipc_mbuf.c 1.30 82/03/09 */ - -#include "../h/param.h" -#include "../h/dir.h" -#include "../h/user.h" -#include "../h/proc.h" -#include "../h/pte.h" -#include "../h/cmap.h" -#include "../h/map.h" -#include "../h/mbuf.h" -#include "../net/in_systm.h" /* XXX */ -#include "../h/vm.h" +/* + * All rights reserved. + * + * Redistribution and use in source and binary forms are permitted + * provided that the above copyright notice and this paragraph are + * duplicated in all such forms and that any documentation, + * advertising materials, and other materials related to such + * distribution and use acknowledge that the software was developed + * by the University of California, Berkeley. The name of the + * University may not be used to endorse or promote products derived + * from this software without specific prior written permission. + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. + * + * @(#)uipc_mbuf.c 7.4.1.3 (Berkeley) %G% + */ + +#include "../machine/pte.h" + +#include "param.h" +#include "dir.h" +#include "user.h" +#include "proc.h" +#include "cmap.h" +#include "map.h" +#include "mbuf.h" +#include "vm.h" +#include "kernel.h" +#include "syslog.h" +#include "domain.h" +#include "protosw.h" mbinit() { + int s; -COUNT(MBINIT); - if (m_reserve(32) == 0) - goto bad; - if (m_clalloc(4, MPG_MBUFS) == 0) +#if CLBYTES < 4096 +#define NCL_INIT (4096/CLBYTES) +#else +#define NCL_INIT 1 +#endif + s = splimp(); + if (m_clalloc(NCL_INIT, MPG_MBUFS, M_DONTWAIT) == 0) goto bad; - if (m_clalloc(32, MPG_CLUSTERS) == 0) + if (m_clalloc(NCL_INIT, MPG_CLUSTERS, M_DONTWAIT) == 0) goto bad; + splx(s); return; bad: panic("mbinit"); } +/* + * Must be called at splimp. + */ +/* ARGSUSED */ caddr_t -m_clalloc(ncl, how) +m_clalloc(ncl, how, canwait) register int ncl; int how; { int npg, mbx; register struct mbuf *m; register int i; - int s; + static int logged; -COUNT(M_CLALLOC); npg = ncl * CLSIZE; - s = splimp(); /* careful: rmalloc isn't reentrant */ - mbx = rmalloc(mbmap, npg); - splx(s); - if (mbx == 0) + mbx = rmalloc(mbmap, (long)npg); + if (mbx == 0) { + if (logged == 0) { + logged++; + log(LOG_ERR, "mbuf map full\n"); + } return (0); - m = cltom(mbx / CLSIZE); - if (memall(&Mbmap[mbx], ncl * CLSIZE, proc, CSYS) == 0) + } + m = cltom(mbx * NBPG / MCLBYTES); + if (memall(&Mbmap[mbx], npg, proc, CSYS) == 0) { + rmfree(mbmap, (long)npg, (long)mbx); return (0); + } vmaccess(&Mbmap[mbx], (caddr_t)m, npg); switch (how) { case MPG_CLUSTERS: - s = splimp(); + ncl = ncl * CLBYTES / MCLBYTES; for (i = 0; i < ncl; i++) { m->m_off = 0; m->m_next = mclfree; mclfree = m; - m += CLBYTES / sizeof (*m); - nmclfree++; + m += MCLBYTES / sizeof (*m); + mbstat.m_clfree++; } mbstat.m_clusters += ncl; - splx(s); break; case MPG_MBUFS: for (i = ncl * CLBYTES / sizeof (*m); i > 0; i--) { m->m_off = 0; - m->m_free = 0; + m->m_type = MT_DATA; + mbstat.m_mtypes[MT_DATA]++; + mbstat.m_mbufs++; (void) m_free(m); m++; } - mbstat.m_clusters += ncl; break; } return ((caddr_t)m); } -m_pgfree(addr, n) - caddr_t addr; - int n; -{ - -COUNT(M_PGFREE); -} - -m_expand() -{ - -COUNT(M_EXPAND); - if (mbstat.m_bufs >= mbstat.m_hiwat) - return (0); - if (m_clalloc(1, MPG_MBUFS) == 0) - goto steal; - return (1); -steal: - /* should ask protocols to free code */ - return (0); -} - -/* NEED SOME WAY TO RELEASE SPACE */ - /* - * Space reservation routines + * Must be called at splimp. */ -m_reserve(mbufs) - int mbufs; +m_expand(canwait) + int canwait; { - - if (mbstat.m_lowat + (mbufs>>1) > (NMBCLUSTERS-32) * CLBYTES) - return (0); - mbstat.m_hiwat += mbufs; - mbstat.m_lowat = mbstat.m_hiwat >> 1; - return (1); + register struct domain *dp; + register struct protosw *pr; + int tries; + + for (tries = 0;; ) { + if (m_clalloc(1, MPG_MBUFS, canwait)) + return (1); + if (canwait == 0 || tries++) + return (0); + + /* ask protocols to free space */ + for (dp = domains; dp; dp = dp->dom_next) + for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; + pr++) + if (pr->pr_drain) + (*pr->pr_drain)(); + mbstat.m_drain++; + } } -m_release(mbufs) - int mbufs; -{ - - mbstat.m_hiwat -= mbufs; - mbstat.m_lowat = mbstat.m_hiwat >> 1; -} +/* NEED SOME WAY TO RELEASE SPACE */ /* * Space allocation routines. @@ -127,27 +142,24 @@ m_release(mbufs) * for critical paths. */ struct mbuf * -m_get(canwait) - int canwait; +m_get(canwait, type) + int canwait, type; { register struct mbuf *m; -COUNT(M_GET); - MGET(m, canwait); + MGET(m, canwait, type); return (m); } struct mbuf * -m_getclr(canwait) - int canwait; +m_getclr(canwait, type) + int canwait, type; { register struct mbuf *m; -COUNT(M_GETCLR); - m = m_get(canwait); + MGET(m, canwait, type); if (m == 0) return (0); - m->m_off = MMINOFF; bzero(mtod(m, caddr_t), MLEN); return (m); } @@ -158,25 +170,35 @@ m_free(m) { register struct mbuf *n; -COUNT(M_FREE); MFREE(m, n); return (n); } +/* + * Get more mbufs; called from MGET macro if mfree list is empty. + * Must be called at splimp. + */ /*ARGSUSED*/ struct mbuf * -m_more(type) - int type; +m_more(canwait, type) + int canwait, type; { register struct mbuf *m; -COUNT(M_MORE); - if (!m_expand()) { - mbstat.m_drops++; - return (NULL); + while (m_expand(canwait) == 0) { + if (canwait == M_WAIT) { + mbstat.m_wait++; + m_want++; + sleep((caddr_t)&mfree, PZERO - 1); + if (mfree) + break; + } else { + mbstat.m_drops++; + return (NULL); + } } -#define m_more(x) (panic("m_more"), (struct mbuf *)0) - MGET(m, type); +#define m_more(x,y) (panic("m_more"), (struct mbuf *)0) + MGET(m, canwait, type); #undef m_more return (m); } @@ -187,7 +209,6 @@ m_freem(m) register struct mbuf *n; register int s; -COUNT(M_FREEM); if (m == NULL) return; s = splimp(); @@ -200,6 +221,13 @@ COUNT(M_FREEM); /* * Mbuffer utility routines. */ + +/* +/* + * Make a copy of an mbuf chain starting "off" bytes from the beginning, + * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. + * Should get M_WAIT/M_DONTWAIT from caller. + */ struct mbuf * m_copy(m, off, len) register struct mbuf *m; @@ -208,7 +236,6 @@ m_copy(m, off, len) { register struct mbuf *n, **np; struct mbuf *top, *p; -COUNT(M_COPY); if (len == 0) return (0); @@ -230,7 +257,7 @@ COUNT(M_COPY); panic("m_copy"); break; } - MGET(n, 1); + MGET(n, M_DONTWAIT, m->m_type); *np = n; if (n == 0) goto nospace; @@ -239,11 +266,9 @@ COUNT(M_COPY); p = mtod(m, struct mbuf *); n->m_off = ((int)p - (int)n) + off; mclrefcnt[mtocl(p)]++; - } else { - n->m_off = MMINOFF; + } else bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), (unsigned)n->m_len); - } if (len != M_COPYALL) len -= n->m_len; off = 0; @@ -256,6 +281,40 @@ nospace: return (0); } +/* + * Copy data from an mbuf chain starting "off" bytes from the beginning, + * continuing for "len" bytes, into the indicated buffer. + */ +m_copydata(m, off, len, cp) + register struct mbuf *m; + register int off; + register int len; + caddr_t cp; +{ + register unsigned count; + + if (off < 0 || len < 0) + panic("m_copydata"); + while (off > 0) { + if (m == 0) + panic("m_copydata"); + if (off < m->m_len) + break; + off -= m->m_len; + m = m->m_next; + } + while (len > 0) { + if (m == 0) + panic("m_copydata"); + count = MIN(m->m_len - off, len); + bcopy(mtod(m, caddr_t) + off, cp, count); + len -= count; + cp += count; + off = 0; + m = m->m_next; + } +} + m_cat(m, n) register struct mbuf *m, *n; { @@ -280,9 +339,9 @@ m_adj(mp, len) struct mbuf *mp; register int len; { - register struct mbuf *m, *n; + register struct mbuf *m; + register count; -COUNT(M_ADJ); if ((m = mp) == NULL) return; if (len >= 0) { @@ -298,55 +357,86 @@ COUNT(M_ADJ); } } } else { - /* a 2 pass algorithm might be better */ + /* + * Trim from tail. Scan the mbuf chain, + * calculating its length and finding the last mbuf. + * If the adjustment only affects this mbuf, then just + * adjust and return. Otherwise, rescan and truncate + * after the remaining size. + */ len = -len; - while (len > 0 && m->m_len != 0) { - while (m != NULL && m->m_len != 0) { - n = m; - m = m->m_next; - } - if (n->m_len <= len) { - len -= n->m_len; - n->m_len = 0; - m = mp; - } else { - n->m_len -= len; + count = 0; + for (;;) { + count += m->m_len; + if (m->m_next == (struct mbuf *)0) + break; + m = m->m_next; + } + if (m->m_len >= len) { + m->m_len -= len; + return; + } + count -= len; + /* + * Correct length for chain is "count". + * Find the mbuf with last data, adjust its length, + * and toss data from remaining mbufs on chain. + */ + for (m = mp; m; m = m->m_next) { + if (m->m_len >= count) { + m->m_len = count; break; } + count -= m->m_len; } + while (m = m->m_next) + m->m_len = 0; } } +/* + * Rearange an mbuf chain so that len bytes are contiguous + * and in the data area of an mbuf (so that mtod and dtom + * will work for a structure of size len). Returns the resulting + * mbuf chain on success, frees it and returns null on failure. + * If there is room, it will add up to MPULL_EXTRA bytes to the + * contiguous region in an attempt to avoid being called next time. + */ struct mbuf * -m_pullup(m0, len) - struct mbuf *m0; +m_pullup(n, len) + register struct mbuf *n; int len; { - register struct mbuf *m, *n; - int cnt; + register struct mbuf *m; + register int count; + int space; - n = m0; - if (len > MLEN) - goto bad; - MGET(m, 0); - if (m == 0) - goto bad; - m->m_off = MMINOFF; - m->m_len = 0; + if (n->m_off + len <= MMAXOFF && n->m_next) { + m = n; + n = n->m_next; + len -= m->m_len; + } else { + if (len > MLEN) + goto bad; + MGET(m, M_DONTWAIT, n->m_type); + if (m == 0) + goto bad; + m->m_len = 0; + } + space = MMAXOFF - m->m_off; do { - cnt = MIN(MLEN - m->m_len, len); - if (cnt > n->m_len) - cnt = n->m_len; - bcopy(mtod(n, caddr_t), mtod(m, caddr_t)+m->m_len, cnt); - len -= cnt; - m->m_len += cnt; - n->m_off += cnt; - n->m_len -= cnt; + count = MIN(MIN(space - m->m_len, len + MPULL_EXTRA), n->m_len); + bcopy(mtod(n, caddr_t), mtod(m, caddr_t)+m->m_len, + (unsigned)count); + len -= count; + m->m_len += count; + n->m_len -= count; if (n->m_len) - break; - n = m_free(n); - } while (n); - if (len) { + n->m_off += count; + else + n = m_free(n); + } while (len > 0 && n); + if (len > 0) { (void) m_free(m); goto bad; }