Commit | Line | Data |
---|---|---|
b688fc87 WJ |
1 | /* |
2 | * Copyright (c) 1982, 1986, 1988, 1990 Regents of the University of California. | |
3 | * All rights reserved. | |
4 | * | |
5 | * Redistribution and use in source and binary forms, with or without | |
6 | * modification, are permitted provided that the following conditions | |
7 | * are met: | |
8 | * 1. Redistributions of source code must retain the above copyright | |
9 | * notice, this list of conditions and the following disclaimer. | |
10 | * 2. Redistributions in binary form must reproduce the above copyright | |
11 | * notice, this list of conditions and the following disclaimer in the | |
12 | * documentation and/or other materials provided with the distribution. | |
13 | * 3. All advertising materials mentioning features or use of this software | |
14 | * must display the following acknowledgement: | |
15 | * This product includes software developed by the University of | |
16 | * California, Berkeley and its contributors. | |
17 | * 4. Neither the name of the University nor the names of its contributors | |
18 | * may be used to endorse or promote products derived from this software | |
19 | * without specific prior written permission. | |
20 | * | |
21 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
22 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
23 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
24 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
25 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
26 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
27 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
28 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
29 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
30 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
31 | * SUCH DAMAGE. | |
32 | * | |
33 | * @(#)uipc_socket2.c 7.17 (Berkeley) 5/4/91 | |
34 | */ | |
35 | ||
36 | #include "param.h" | |
37 | #include "systm.h" | |
38 | #include "proc.h" | |
39 | #include "file.h" | |
40 | #include "buf.h" | |
41 | #include "malloc.h" | |
42 | #include "mbuf.h" | |
43 | #include "protosw.h" | |
44 | #include "socket.h" | |
45 | #include "socketvar.h" | |
46 | ||
47 | /* | |
48 | * Primitive routines for operating on sockets and socket buffers | |
49 | */ | |
50 | ||
51 | /* strings for sleep message: */ | |
52 | char netio[] = "netio"; | |
53 | char netcon[] = "netcon"; | |
54 | char netcls[] = "netcls"; | |
55 | ||
56 | u_long sb_max = SB_MAX; /* patchable */ | |
57 | ||
58 | /* | |
59 | * Procedures to manipulate state flags of socket | |
60 | * and do appropriate wakeups. Normal sequence from the | |
61 | * active (originating) side is that soisconnecting() is | |
62 | * called during processing of connect() call, | |
63 | * resulting in an eventual call to soisconnected() if/when the | |
64 | * connection is established. When the connection is torn down | |
65 | * soisdisconnecting() is called during processing of disconnect() call, | |
66 | * and soisdisconnected() is called when the connection to the peer | |
67 | * is totally severed. The semantics of these routines are such that | |
68 | * connectionless protocols can call soisconnected() and soisdisconnected() | |
69 | * only, bypassing the in-progress calls when setting up a ``connection'' | |
70 | * takes no time. | |
71 | * | |
72 | * From the passive side, a socket is created with | |
73 | * two queues of sockets: so_q0 for connections in progress | |
74 | * and so_q for connections already made and awaiting user acceptance. | |
75 | * As a protocol is preparing incoming connections, it creates a socket | |
76 | * structure queued on so_q0 by calling sonewconn(). When the connection | |
77 | * is established, soisconnected() is called, and transfers the | |
78 | * socket structure to so_q, making it available to accept(). | |
79 | * | |
80 | * If a socket is closed with sockets on either | |
81 | * so_q0 or so_q, these sockets are dropped. | |
82 | * | |
83 | * If higher level protocols are implemented in | |
84 | * the kernel, the wakeups done here will sometimes | |
85 | * cause software-interrupt process scheduling. | |
86 | */ | |
87 | ||
88 | soisconnecting(so) | |
89 | register struct socket *so; | |
90 | { | |
91 | ||
92 | so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING); | |
93 | so->so_state |= SS_ISCONNECTING; | |
94 | } | |
95 | ||
96 | soisconnected(so) | |
97 | register struct socket *so; | |
98 | { | |
99 | register struct socket *head = so->so_head; | |
100 | ||
101 | so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING); | |
102 | so->so_state |= SS_ISCONNECTED; | |
103 | if (head && soqremque(so, 0)) { | |
104 | soqinsque(head, so, 1); | |
105 | sorwakeup(head); | |
106 | wakeup((caddr_t)&head->so_timeo); | |
107 | } else { | |
108 | wakeup((caddr_t)&so->so_timeo); | |
109 | sorwakeup(so); | |
110 | sowwakeup(so); | |
111 | } | |
112 | } | |
113 | ||
114 | soisdisconnecting(so) | |
115 | register struct socket *so; | |
116 | { | |
117 | ||
118 | so->so_state &= ~SS_ISCONNECTING; | |
119 | so->so_state |= (SS_ISDISCONNECTING|SS_CANTRCVMORE|SS_CANTSENDMORE); | |
120 | wakeup((caddr_t)&so->so_timeo); | |
121 | sowwakeup(so); | |
122 | sorwakeup(so); | |
123 | } | |
124 | ||
125 | soisdisconnected(so) | |
126 | register struct socket *so; | |
127 | { | |
128 | ||
129 | so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING); | |
130 | so->so_state |= (SS_CANTRCVMORE|SS_CANTSENDMORE); | |
131 | wakeup((caddr_t)&so->so_timeo); | |
132 | sowwakeup(so); | |
133 | sorwakeup(so); | |
134 | } | |
135 | ||
136 | /* | |
137 | * When an attempt at a new connection is noted on a socket | |
138 | * which accepts connections, sonewconn is called. If the | |
139 | * connection is possible (subject to space constraints, etc.) | |
140 | * then we allocate a new structure, propoerly linked into the | |
141 | * data structure of the original socket, and return this. | |
142 | * Connstatus may be 0, or SO_ISCONFIRMING, or SO_ISCONNECTED. | |
143 | * | |
144 | * Currently, sonewconn() is defined as sonewconn1() in socketvar.h | |
145 | * to catch calls that are missing the (new) second parameter. | |
146 | */ | |
147 | struct socket * | |
148 | sonewconn1(head, connstatus) | |
149 | register struct socket *head; | |
150 | int connstatus; | |
151 | { | |
152 | register struct socket *so; | |
153 | int soqueue = connstatus ? 1 : 0; | |
154 | ||
155 | if (head->so_qlen + head->so_q0len > 3 * head->so_qlimit / 2) | |
156 | return ((struct socket *)0); | |
157 | MALLOC(so, struct socket *, sizeof(*so), M_SOCKET, M_DONTWAIT); | |
158 | if (so == NULL) | |
159 | return ((struct socket *)0); | |
160 | bzero((caddr_t)so, sizeof(*so)); | |
161 | so->so_type = head->so_type; | |
162 | so->so_options = head->so_options &~ SO_ACCEPTCONN; | |
163 | so->so_linger = head->so_linger; | |
164 | so->so_state = head->so_state | SS_NOFDREF; | |
165 | so->so_proto = head->so_proto; | |
166 | so->so_timeo = head->so_timeo; | |
167 | so->so_pgid = head->so_pgid; | |
168 | (void) soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat); | |
169 | soqinsque(head, so, soqueue); | |
170 | if ((*so->so_proto->pr_usrreq)(so, PRU_ATTACH, | |
171 | (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0)) { | |
172 | (void) soqremque(so, soqueue); | |
173 | (void) free((caddr_t)so, M_SOCKET); | |
174 | return ((struct socket *)0); | |
175 | } | |
176 | if (connstatus) { | |
177 | sorwakeup(head); | |
178 | wakeup((caddr_t)&head->so_timeo); | |
179 | so->so_state |= connstatus; | |
180 | } | |
181 | return (so); | |
182 | } | |
183 | ||
184 | soqinsque(head, so, q) | |
185 | register struct socket *head, *so; | |
186 | int q; | |
187 | { | |
188 | ||
189 | register struct socket **prev; | |
190 | so->so_head = head; | |
191 | if (q == 0) { | |
192 | head->so_q0len++; | |
193 | so->so_q0 = 0; | |
194 | for (prev = &(head->so_q0); *prev; ) | |
195 | prev = &((*prev)->so_q0); | |
196 | } else { | |
197 | head->so_qlen++; | |
198 | so->so_q = 0; | |
199 | for (prev = &(head->so_q); *prev; ) | |
200 | prev = &((*prev)->so_q); | |
201 | } | |
202 | *prev = so; | |
203 | } | |
204 | ||
205 | soqremque(so, q) | |
206 | register struct socket *so; | |
207 | int q; | |
208 | { | |
209 | register struct socket *head, *prev, *next; | |
210 | ||
211 | head = so->so_head; | |
212 | prev = head; | |
213 | for (;;) { | |
214 | next = q ? prev->so_q : prev->so_q0; | |
215 | if (next == so) | |
216 | break; | |
217 | if (next == 0) | |
218 | return (0); | |
219 | prev = next; | |
220 | } | |
221 | if (q == 0) { | |
222 | prev->so_q0 = next->so_q0; | |
223 | head->so_q0len--; | |
224 | } else { | |
225 | prev->so_q = next->so_q; | |
226 | head->so_qlen--; | |
227 | } | |
228 | next->so_q0 = next->so_q = 0; | |
229 | next->so_head = 0; | |
230 | return (1); | |
231 | } | |
232 | ||
233 | /* | |
234 | * Socantsendmore indicates that no more data will be sent on the | |
235 | * socket; it would normally be applied to a socket when the user | |
236 | * informs the system that no more data is to be sent, by the protocol | |
237 | * code (in case PRU_SHUTDOWN). Socantrcvmore indicates that no more data | |
238 | * will be received, and will normally be applied to the socket by a | |
239 | * protocol when it detects that the peer will send no more data. | |
240 | * Data queued for reading in the socket may yet be read. | |
241 | */ | |
242 | ||
243 | socantsendmore(so) | |
244 | struct socket *so; | |
245 | { | |
246 | ||
247 | so->so_state |= SS_CANTSENDMORE; | |
248 | sowwakeup(so); | |
249 | } | |
250 | ||
251 | socantrcvmore(so) | |
252 | struct socket *so; | |
253 | { | |
254 | ||
255 | so->so_state |= SS_CANTRCVMORE; | |
256 | sorwakeup(so); | |
257 | } | |
258 | ||
259 | /* | |
260 | * Socket select/wakeup routines. | |
261 | */ | |
262 | ||
263 | /* | |
264 | * Queue a process for a select on a socket buffer. | |
265 | */ | |
266 | sbselqueue(sb, cp) | |
267 | struct sockbuf *sb; | |
268 | struct proc *cp; | |
269 | { | |
270 | struct proc *p; | |
271 | ||
272 | if ((p = sb->sb_sel) && p->p_wchan == (caddr_t)&selwait) | |
273 | sb->sb_flags |= SB_COLL; | |
274 | else { | |
275 | sb->sb_sel = cp; | |
276 | sb->sb_flags |= SB_SEL; | |
277 | } | |
278 | } | |
279 | ||
280 | /* | |
281 | * Wait for data to arrive at/drain from a socket buffer. | |
282 | */ | |
283 | sbwait(sb) | |
284 | struct sockbuf *sb; | |
285 | { | |
286 | ||
287 | sb->sb_flags |= SB_WAIT; | |
288 | return (tsleep((caddr_t)&sb->sb_cc, | |
289 | (sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK | PCATCH, netio, | |
290 | sb->sb_timeo)); | |
291 | } | |
292 | ||
293 | /* | |
294 | * Lock a sockbuf already known to be locked; | |
295 | * return any error returned from sleep (EINTR). | |
296 | */ | |
297 | sb_lock(sb) | |
298 | register struct sockbuf *sb; | |
299 | { | |
300 | int error; | |
301 | ||
302 | while (sb->sb_flags & SB_LOCK) { | |
303 | sb->sb_flags |= SB_WANT; | |
304 | if (error = tsleep((caddr_t)&sb->sb_flags, | |
305 | (sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK|PCATCH, | |
306 | netio, 0)) | |
307 | return (error); | |
308 | } | |
309 | sb->sb_flags |= SB_LOCK; | |
310 | return (0); | |
311 | } | |
312 | ||
313 | /* | |
314 | * Wakeup processes waiting on a socket buffer. | |
315 | * Do asynchronous notification via SIGIO | |
316 | * if the socket has the SS_ASYNC flag set. | |
317 | */ | |
318 | sowakeup(so, sb) | |
319 | register struct socket *so; | |
320 | register struct sockbuf *sb; | |
321 | { | |
322 | struct proc *p; | |
323 | ||
324 | if (sb->sb_sel) { | |
325 | selwakeup(sb->sb_sel, sb->sb_flags & SB_COLL); | |
326 | sb->sb_sel = 0; | |
327 | sb->sb_flags &= ~(SB_SEL|SB_COLL); | |
328 | } | |
329 | if (sb->sb_flags & SB_WAIT) { | |
330 | sb->sb_flags &= ~SB_WAIT; | |
331 | wakeup((caddr_t)&sb->sb_cc); | |
332 | } | |
333 | if (so->so_state & SS_ASYNC) { | |
334 | if (so->so_pgid < 0) | |
335 | gsignal(-so->so_pgid, SIGIO); | |
336 | else if (so->so_pgid > 0 && (p = pfind(so->so_pgid)) != 0) | |
337 | psignal(p, SIGIO); | |
338 | } | |
339 | } | |
340 | ||
341 | /* | |
342 | * Socket buffer (struct sockbuf) utility routines. | |
343 | * | |
344 | * Each socket contains two socket buffers: one for sending data and | |
345 | * one for receiving data. Each buffer contains a queue of mbufs, | |
346 | * information about the number of mbufs and amount of data in the | |
347 | * queue, and other fields allowing select() statements and notification | |
348 | * on data availability to be implemented. | |
349 | * | |
350 | * Data stored in a socket buffer is maintained as a list of records. | |
351 | * Each record is a list of mbufs chained together with the m_next | |
352 | * field. Records are chained together with the m_nextpkt field. The upper | |
353 | * level routine soreceive() expects the following conventions to be | |
354 | * observed when placing information in the receive buffer: | |
355 | * | |
356 | * 1. If the protocol requires each message be preceded by the sender's | |
357 | * name, then a record containing that name must be present before | |
358 | * any associated data (mbuf's must be of type MT_SONAME). | |
359 | * 2. If the protocol supports the exchange of ``access rights'' (really | |
360 | * just additional data associated with the message), and there are | |
361 | * ``rights'' to be received, then a record containing this data | |
362 | * should be present (mbuf's must be of type MT_RIGHTS). | |
363 | * 3. If a name or rights record exists, then it must be followed by | |
364 | * a data record, perhaps of zero length. | |
365 | * | |
366 | * Before using a new socket structure it is first necessary to reserve | |
367 | * buffer space to the socket, by calling sbreserve(). This should commit | |
368 | * some of the available buffer space in the system buffer pool for the | |
369 | * socket (currently, it does nothing but enforce limits). The space | |
370 | * should be released by calling sbrelease() when the socket is destroyed. | |
371 | */ | |
372 | ||
373 | soreserve(so, sndcc, rcvcc) | |
374 | register struct socket *so; | |
375 | u_long sndcc, rcvcc; | |
376 | { | |
377 | ||
378 | if (sbreserve(&so->so_snd, sndcc) == 0) | |
379 | goto bad; | |
380 | if (sbreserve(&so->so_rcv, rcvcc) == 0) | |
381 | goto bad2; | |
382 | if (so->so_rcv.sb_lowat == 0) | |
383 | so->so_rcv.sb_lowat = 1; | |
384 | if (so->so_snd.sb_lowat == 0) | |
385 | so->so_snd.sb_lowat = MCLBYTES; | |
386 | if (so->so_snd.sb_lowat > so->so_snd.sb_hiwat) | |
387 | so->so_snd.sb_lowat = so->so_snd.sb_hiwat; | |
388 | return (0); | |
389 | bad2: | |
390 | sbrelease(&so->so_snd); | |
391 | bad: | |
392 | return (ENOBUFS); | |
393 | } | |
394 | ||
395 | /* | |
396 | * Allot mbufs to a sockbuf. | |
397 | * Attempt to scale mbmax so that mbcnt doesn't become limiting | |
398 | * if buffering efficiency is near the normal case. | |
399 | */ | |
400 | sbreserve(sb, cc) | |
401 | struct sockbuf *sb; | |
402 | u_long cc; | |
403 | { | |
404 | ||
405 | if (cc > sb_max * MCLBYTES / (MSIZE + MCLBYTES)) | |
406 | return (0); | |
407 | sb->sb_hiwat = cc; | |
408 | sb->sb_mbmax = min(cc * 2, sb_max); | |
409 | if (sb->sb_lowat > sb->sb_hiwat) | |
410 | sb->sb_lowat = sb->sb_hiwat; | |
411 | return (1); | |
412 | } | |
413 | ||
414 | /* | |
415 | * Free mbufs held by a socket, and reserved mbuf space. | |
416 | */ | |
417 | sbrelease(sb) | |
418 | struct sockbuf *sb; | |
419 | { | |
420 | ||
421 | sbflush(sb); | |
422 | sb->sb_hiwat = sb->sb_mbmax = 0; | |
423 | } | |
424 | ||
425 | /* | |
426 | * Routines to add and remove | |
427 | * data from an mbuf queue. | |
428 | * | |
429 | * The routines sbappend() or sbappendrecord() are normally called to | |
430 | * append new mbufs to a socket buffer, after checking that adequate | |
431 | * space is available, comparing the function sbspace() with the amount | |
432 | * of data to be added. sbappendrecord() differs from sbappend() in | |
433 | * that data supplied is treated as the beginning of a new record. | |
434 | * To place a sender's address, optional access rights, and data in a | |
435 | * socket receive buffer, sbappendaddr() should be used. To place | |
436 | * access rights and data in a socket receive buffer, sbappendrights() | |
437 | * should be used. In either case, the new data begins a new record. | |
438 | * Note that unlike sbappend() and sbappendrecord(), these routines check | |
439 | * for the caller that there will be enough space to store the data. | |
440 | * Each fails if there is not enough space, or if it cannot find mbufs | |
441 | * to store additional information in. | |
442 | * | |
443 | * Reliable protocols may use the socket send buffer to hold data | |
444 | * awaiting acknowledgement. Data is normally copied from a socket | |
445 | * send buffer in a protocol with m_copy for output to a peer, | |
446 | * and then removing the data from the socket buffer with sbdrop() | |
447 | * or sbdroprecord() when the data is acknowledged by the peer. | |
448 | */ | |
449 | ||
450 | /* | |
451 | * Append mbuf chain m to the last record in the | |
452 | * socket buffer sb. The additional space associated | |
453 | * the mbuf chain is recorded in sb. Empty mbufs are | |
454 | * discarded and mbufs are compacted where possible. | |
455 | */ | |
456 | sbappend(sb, m) | |
457 | struct sockbuf *sb; | |
458 | struct mbuf *m; | |
459 | { | |
460 | register struct mbuf *n; | |
461 | ||
462 | if (m == 0) | |
463 | return; | |
464 | if (n = sb->sb_mb) { | |
465 | while (n->m_nextpkt) | |
466 | n = n->m_nextpkt; | |
467 | do { | |
468 | if (n->m_flags & M_EOR) { | |
469 | sbappendrecord(sb, m); /* XXXXXX!!!! */ | |
470 | return; | |
471 | } | |
472 | } while (n->m_next && (n = n->m_next)); | |
473 | } | |
474 | sbcompress(sb, m, n); | |
475 | } | |
476 | ||
477 | #ifdef SOCKBUF_DEBUG | |
478 | sbcheck(sb) | |
479 | register struct sockbuf *sb; | |
480 | { | |
481 | register struct mbuf *m; | |
482 | register int len = 0, mbcnt = 0; | |
483 | ||
484 | for (m = sb->sb_mb; m; m = m->m_next) { | |
485 | len += m->m_len; | |
486 | mbcnt += MSIZE; | |
487 | if (m->m_flags & M_EXT) | |
488 | mbcnt += m->m_ext.ext_size; | |
489 | if (m->m_nextpkt) | |
490 | panic("sbcheck nextpkt"); | |
491 | } | |
492 | if (len != sb->sb_cc || mbcnt != sb->sb_mbcnt) { | |
493 | printf("cc %d != %d || mbcnt %d != %d\n", len, sb->sb_cc, | |
494 | mbcnt, sb->sb_mbcnt); | |
495 | panic("sbcheck"); | |
496 | } | |
497 | } | |
498 | #endif | |
499 | ||
500 | /* | |
501 | * As above, except the mbuf chain | |
502 | * begins a new record. | |
503 | */ | |
504 | sbappendrecord(sb, m0) | |
505 | register struct sockbuf *sb; | |
506 | register struct mbuf *m0; | |
507 | { | |
508 | register struct mbuf *m; | |
509 | ||
510 | if (m0 == 0) | |
511 | return; | |
512 | if (m = sb->sb_mb) | |
513 | while (m->m_nextpkt) | |
514 | m = m->m_nextpkt; | |
515 | /* | |
516 | * Put the first mbuf on the queue. | |
517 | * Note this permits zero length records. | |
518 | */ | |
519 | sballoc(sb, m0); | |
520 | if (m) | |
521 | m->m_nextpkt = m0; | |
522 | else | |
523 | sb->sb_mb = m0; | |
524 | m = m0->m_next; | |
525 | m0->m_next = 0; | |
526 | if (m && (m0->m_flags & M_EOR)) { | |
527 | m0->m_flags &= ~M_EOR; | |
528 | m->m_flags |= M_EOR; | |
529 | } | |
530 | sbcompress(sb, m, m0); | |
531 | } | |
532 | ||
533 | /* | |
534 | * As above except that OOB data | |
535 | * is inserted at the beginning of the sockbuf, | |
536 | * but after any other OOB data. | |
537 | */ | |
538 | sbinsertoob(sb, m0) | |
539 | register struct sockbuf *sb; | |
540 | register struct mbuf *m0; | |
541 | { | |
542 | register struct mbuf *m; | |
543 | register struct mbuf **mp; | |
544 | ||
545 | if (m0 == 0) | |
546 | return; | |
547 | for (mp = &sb->sb_mb; m = *mp; mp = &((*mp)->m_nextpkt)) { | |
548 | again: | |
549 | switch (m->m_type) { | |
550 | ||
551 | case MT_OOBDATA: | |
552 | continue; /* WANT next train */ | |
553 | ||
554 | case MT_CONTROL: | |
555 | if (m = m->m_next) | |
556 | goto again; /* inspect THIS train further */ | |
557 | } | |
558 | break; | |
559 | } | |
560 | /* | |
561 | * Put the first mbuf on the queue. | |
562 | * Note this permits zero length records. | |
563 | */ | |
564 | sballoc(sb, m0); | |
565 | m0->m_nextpkt = *mp; | |
566 | *mp = m0; | |
567 | m = m0->m_next; | |
568 | m0->m_next = 0; | |
569 | if (m && (m0->m_flags & M_EOR)) { | |
570 | m0->m_flags &= ~M_EOR; | |
571 | m->m_flags |= M_EOR; | |
572 | } | |
573 | sbcompress(sb, m, m0); | |
574 | } | |
575 | ||
576 | /* | |
577 | * Append address and data, and optionally, control (ancillary) data | |
578 | * to the receive queue of a socket. If present, | |
579 | * m0 must include a packet header with total length. | |
580 | * Returns 0 if no space in sockbuf or insufficient mbufs. | |
581 | */ | |
582 | sbappendaddr(sb, asa, m0, control) | |
583 | register struct sockbuf *sb; | |
584 | struct sockaddr *asa; | |
585 | struct mbuf *m0, *control; | |
586 | { | |
587 | register struct mbuf *m, *n; | |
588 | int space = asa->sa_len; | |
589 | ||
590 | if (m0 && (m0->m_flags & M_PKTHDR) == 0) | |
591 | panic("sbappendaddr"); | |
592 | if (m0) | |
593 | space += m0->m_pkthdr.len; | |
594 | for (n = control; n; n = n->m_next) { | |
595 | space += n->m_len; | |
596 | if (n->m_next == 0) /* keep pointer to last control buf */ | |
597 | break; | |
598 | } | |
599 | if (space > sbspace(sb)) | |
600 | return (0); | |
601 | if (asa->sa_len > MLEN) | |
602 | return (0); | |
603 | MGET(m, M_DONTWAIT, MT_SONAME); | |
604 | if (m == 0) | |
605 | return (0); | |
606 | m->m_len = asa->sa_len; | |
607 | bcopy((caddr_t)asa, mtod(m, caddr_t), asa->sa_len); | |
608 | if (n) | |
609 | n->m_next = m0; /* concatenate data to control */ | |
610 | else | |
611 | control = m0; | |
612 | m->m_next = control; | |
613 | for (n = m; n; n = n->m_next) | |
614 | sballoc(sb, n); | |
615 | if (n = sb->sb_mb) { | |
616 | while (n->m_nextpkt) | |
617 | n = n->m_nextpkt; | |
618 | n->m_nextpkt = m; | |
619 | } else | |
620 | sb->sb_mb = m; | |
621 | return (1); | |
622 | } | |
623 | ||
624 | sbappendcontrol(sb, m0, control) | |
625 | struct sockbuf *sb; | |
626 | struct mbuf *control, *m0; | |
627 | { | |
628 | register struct mbuf *m, *n; | |
629 | int space = 0; | |
630 | ||
631 | if (control == 0) | |
632 | panic("sbappendcontrol"); | |
633 | for (m = control; ; m = m->m_next) { | |
634 | space += m->m_len; | |
635 | if (m->m_next == 0) | |
636 | break; | |
637 | } | |
638 | n = m; /* save pointer to last control buffer */ | |
639 | for (m = m0; m; m = m->m_next) | |
640 | space += m->m_len; | |
641 | if (space > sbspace(sb)) | |
642 | return (0); | |
643 | n->m_next = m0; /* concatenate data to control */ | |
644 | for (m = control; m; m = m->m_next) | |
645 | sballoc(sb, m); | |
646 | if (n = sb->sb_mb) { | |
647 | while (n->m_nextpkt) | |
648 | n = n->m_nextpkt; | |
649 | n->m_nextpkt = control; | |
650 | } else | |
651 | sb->sb_mb = control; | |
652 | return (1); | |
653 | } | |
654 | ||
655 | /* | |
656 | * Compress mbuf chain m into the socket | |
657 | * buffer sb following mbuf n. If n | |
658 | * is null, the buffer is presumed empty. | |
659 | */ | |
660 | sbcompress(sb, m, n) | |
661 | register struct sockbuf *sb; | |
662 | register struct mbuf *m, *n; | |
663 | { | |
664 | register int eor = 0; | |
665 | register struct mbuf *o; | |
666 | ||
667 | while (m) { | |
668 | eor |= m->m_flags & M_EOR; | |
669 | if (m->m_len == 0 && | |
670 | (eor == 0 || | |
671 | (((o = m->m_next) || (o = n)) && | |
672 | o->m_type == m->m_type))) { | |
673 | m = m_free(m); | |
674 | continue; | |
675 | } | |
676 | if (n && (n->m_flags & (M_EXT | M_EOR)) == 0 && | |
677 | (n->m_data + n->m_len + m->m_len) < &n->m_dat[MLEN] && | |
678 | n->m_type == m->m_type) { | |
679 | bcopy(mtod(m, caddr_t), mtod(n, caddr_t) + n->m_len, | |
680 | (unsigned)m->m_len); | |
681 | n->m_len += m->m_len; | |
682 | sb->sb_cc += m->m_len; | |
683 | m = m_free(m); | |
684 | continue; | |
685 | } | |
686 | if (n) | |
687 | n->m_next = m; | |
688 | else | |
689 | sb->sb_mb = m; | |
690 | sballoc(sb, m); | |
691 | n = m; | |
692 | m->m_flags &= ~M_EOR; | |
693 | m = m->m_next; | |
694 | n->m_next = 0; | |
695 | } | |
696 | if (eor) { | |
697 | if (n) | |
698 | n->m_flags |= eor; | |
699 | else | |
700 | printf("semi-panic: sbcompress\n"); | |
701 | } | |
702 | } | |
703 | ||
704 | /* | |
705 | * Free all mbufs in a sockbuf. | |
706 | * Check that all resources are reclaimed. | |
707 | */ | |
708 | sbflush(sb) | |
709 | register struct sockbuf *sb; | |
710 | { | |
711 | ||
712 | if (sb->sb_flags & SB_LOCK) | |
713 | panic("sbflush"); | |
714 | while (sb->sb_mbcnt) | |
715 | sbdrop(sb, (int)sb->sb_cc); | |
716 | if (sb->sb_cc || sb->sb_mb) | |
717 | panic("sbflush 2"); | |
718 | } | |
719 | ||
720 | /* | |
721 | * Drop data from (the front of) a sockbuf. | |
722 | */ | |
723 | sbdrop(sb, len) | |
724 | register struct sockbuf *sb; | |
725 | register int len; | |
726 | { | |
727 | register struct mbuf *m, *mn; | |
728 | struct mbuf *next; | |
729 | ||
730 | next = (m = sb->sb_mb) ? m->m_nextpkt : 0; | |
731 | while (len > 0) { | |
732 | if (m == 0) { | |
733 | if (next == 0) | |
734 | panic("sbdrop"); | |
735 | m = next; | |
736 | next = m->m_nextpkt; | |
737 | continue; | |
738 | } | |
739 | if (m->m_len > len) { | |
740 | m->m_len -= len; | |
741 | m->m_data += len; | |
742 | sb->sb_cc -= len; | |
743 | break; | |
744 | } | |
745 | len -= m->m_len; | |
746 | sbfree(sb, m); | |
747 | MFREE(m, mn); | |
748 | m = mn; | |
749 | } | |
750 | while (m && m->m_len == 0) { | |
751 | sbfree(sb, m); | |
752 | MFREE(m, mn); | |
753 | m = mn; | |
754 | } | |
755 | if (m) { | |
756 | sb->sb_mb = m; | |
757 | m->m_nextpkt = next; | |
758 | } else | |
759 | sb->sb_mb = next; | |
760 | } | |
761 | ||
762 | /* | |
763 | * Drop a record off the front of a sockbuf | |
764 | * and move the next record to the front. | |
765 | */ | |
766 | sbdroprecord(sb) | |
767 | register struct sockbuf *sb; | |
768 | { | |
769 | register struct mbuf *m, *mn; | |
770 | ||
771 | m = sb->sb_mb; | |
772 | if (m) { | |
773 | sb->sb_mb = m->m_nextpkt; | |
774 | do { | |
775 | sbfree(sb, m); | |
776 | MFREE(m, mn); | |
777 | } while (m = mn); | |
778 | } | |
779 | } |