Commit | Line | Data |
---|---|---|
15637ed4 RG |
1 | /* |
2 | * Copyright (c) 1982, 1986, 1988, 1990 Regents of the University of California. | |
3 | * All rights reserved. | |
4 | * | |
5 | * Redistribution and use in source and binary forms, with or without | |
6 | * modification, are permitted provided that the following conditions | |
7 | * are met: | |
8 | * 1. Redistributions of source code must retain the above copyright | |
9 | * notice, this list of conditions and the following disclaimer. | |
10 | * 2. Redistributions in binary form must reproduce the above copyright | |
11 | * notice, this list of conditions and the following disclaimer in the | |
12 | * documentation and/or other materials provided with the distribution. | |
13 | * 3. All advertising materials mentioning features or use of this software | |
14 | * must display the following acknowledgement: | |
15 | * This product includes software developed by the University of | |
16 | * California, Berkeley and its contributors. | |
17 | * 4. Neither the name of the University nor the names of its contributors | |
18 | * may be used to endorse or promote products derived from this software | |
19 | * without specific prior written permission. | |
20 | * | |
21 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
22 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
23 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
24 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
25 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
26 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
27 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
28 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
29 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
30 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
31 | * SUCH DAMAGE. | |
32 | * | |
78ed81a3 | 33 | * from: @(#)uipc_socket2.c 7.17 (Berkeley) 5/4/91 |
34 | * $Id$ | |
15637ed4 RG |
35 | */ |
36 | ||
37 | #include "param.h" | |
38 | #include "systm.h" | |
39 | #include "proc.h" | |
40 | #include "file.h" | |
41 | #include "buf.h" | |
42 | #include "malloc.h" | |
43 | #include "mbuf.h" | |
44 | #include "protosw.h" | |
45 | #include "socket.h" | |
46 | #include "socketvar.h" | |
47 | ||
48 | /* | |
49 | * Primitive routines for operating on sockets and socket buffers | |
50 | */ | |
51 | ||
52 | /* strings for sleep message: */ | |
53 | char netio[] = "netio"; | |
54 | char netcon[] = "netcon"; | |
55 | char netcls[] = "netcls"; | |
56 | ||
57 | u_long sb_max = SB_MAX; /* patchable */ | |
58 | ||
59 | /* | |
60 | * Procedures to manipulate state flags of socket | |
61 | * and do appropriate wakeups. Normal sequence from the | |
62 | * active (originating) side is that soisconnecting() is | |
63 | * called during processing of connect() call, | |
64 | * resulting in an eventual call to soisconnected() if/when the | |
65 | * connection is established. When the connection is torn down | |
66 | * soisdisconnecting() is called during processing of disconnect() call, | |
67 | * and soisdisconnected() is called when the connection to the peer | |
68 | * is totally severed. The semantics of these routines are such that | |
69 | * connectionless protocols can call soisconnected() and soisdisconnected() | |
70 | * only, bypassing the in-progress calls when setting up a ``connection'' | |
71 | * takes no time. | |
72 | * | |
73 | * From the passive side, a socket is created with | |
74 | * two queues of sockets: so_q0 for connections in progress | |
75 | * and so_q for connections already made and awaiting user acceptance. | |
76 | * As a protocol is preparing incoming connections, it creates a socket | |
77 | * structure queued on so_q0 by calling sonewconn(). When the connection | |
78 | * is established, soisconnected() is called, and transfers the | |
79 | * socket structure to so_q, making it available to accept(). | |
80 | * | |
81 | * If a socket is closed with sockets on either | |
82 | * so_q0 or so_q, these sockets are dropped. | |
83 | * | |
84 | * If higher level protocols are implemented in | |
85 | * the kernel, the wakeups done here will sometimes | |
86 | * cause software-interrupt process scheduling. | |
87 | */ | |
88 | ||
89 | soisconnecting(so) | |
90 | register struct socket *so; | |
91 | { | |
92 | ||
93 | so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING); | |
94 | so->so_state |= SS_ISCONNECTING; | |
95 | } | |
96 | ||
97 | soisconnected(so) | |
98 | register struct socket *so; | |
99 | { | |
100 | register struct socket *head = so->so_head; | |
101 | ||
102 | so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING); | |
103 | so->so_state |= SS_ISCONNECTED; | |
104 | if (head && soqremque(so, 0)) { | |
105 | soqinsque(head, so, 1); | |
106 | sorwakeup(head); | |
107 | wakeup((caddr_t)&head->so_timeo); | |
108 | } else { | |
109 | wakeup((caddr_t)&so->so_timeo); | |
110 | sorwakeup(so); | |
111 | sowwakeup(so); | |
112 | } | |
113 | } | |
114 | ||
115 | soisdisconnecting(so) | |
116 | register struct socket *so; | |
117 | { | |
118 | ||
119 | so->so_state &= ~SS_ISCONNECTING; | |
120 | so->so_state |= (SS_ISDISCONNECTING|SS_CANTRCVMORE|SS_CANTSENDMORE); | |
121 | wakeup((caddr_t)&so->so_timeo); | |
122 | sowwakeup(so); | |
123 | sorwakeup(so); | |
124 | } | |
125 | ||
126 | soisdisconnected(so) | |
127 | register struct socket *so; | |
128 | { | |
129 | ||
130 | so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING); | |
131 | so->so_state |= (SS_CANTRCVMORE|SS_CANTSENDMORE); | |
132 | wakeup((caddr_t)&so->so_timeo); | |
133 | sowwakeup(so); | |
134 | sorwakeup(so); | |
135 | } | |
136 | ||
137 | /* | |
138 | * When an attempt at a new connection is noted on a socket | |
139 | * which accepts connections, sonewconn is called. If the | |
140 | * connection is possible (subject to space constraints, etc.) | |
141 | * then we allocate a new structure, propoerly linked into the | |
142 | * data structure of the original socket, and return this. | |
143 | * Connstatus may be 0, or SO_ISCONFIRMING, or SO_ISCONNECTED. | |
144 | * | |
145 | * Currently, sonewconn() is defined as sonewconn1() in socketvar.h | |
146 | * to catch calls that are missing the (new) second parameter. | |
147 | */ | |
148 | struct socket * | |
149 | sonewconn1(head, connstatus) | |
150 | register struct socket *head; | |
151 | int connstatus; | |
152 | { | |
153 | register struct socket *so; | |
154 | int soqueue = connstatus ? 1 : 0; | |
155 | ||
156 | if (head->so_qlen + head->so_q0len > 3 * head->so_qlimit / 2) | |
157 | return ((struct socket *)0); | |
158 | MALLOC(so, struct socket *, sizeof(*so), M_SOCKET, M_DONTWAIT); | |
159 | if (so == NULL) | |
160 | return ((struct socket *)0); | |
161 | bzero((caddr_t)so, sizeof(*so)); | |
162 | so->so_type = head->so_type; | |
163 | so->so_options = head->so_options &~ SO_ACCEPTCONN; | |
164 | so->so_linger = head->so_linger; | |
165 | so->so_state = head->so_state | SS_NOFDREF; | |
166 | so->so_proto = head->so_proto; | |
167 | so->so_timeo = head->so_timeo; | |
168 | so->so_pgid = head->so_pgid; | |
169 | (void) soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat); | |
170 | soqinsque(head, so, soqueue); | |
171 | if ((*so->so_proto->pr_usrreq)(so, PRU_ATTACH, | |
172 | (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0)) { | |
173 | (void) soqremque(so, soqueue); | |
174 | (void) free((caddr_t)so, M_SOCKET); | |
175 | return ((struct socket *)0); | |
176 | } | |
177 | if (connstatus) { | |
178 | sorwakeup(head); | |
179 | wakeup((caddr_t)&head->so_timeo); | |
180 | so->so_state |= connstatus; | |
181 | } | |
182 | return (so); | |
183 | } | |
184 | ||
185 | soqinsque(head, so, q) | |
186 | register struct socket *head, *so; | |
187 | int q; | |
188 | { | |
189 | ||
190 | register struct socket **prev; | |
191 | so->so_head = head; | |
192 | if (q == 0) { | |
193 | head->so_q0len++; | |
194 | so->so_q0 = 0; | |
195 | for (prev = &(head->so_q0); *prev; ) | |
196 | prev = &((*prev)->so_q0); | |
197 | } else { | |
198 | head->so_qlen++; | |
199 | so->so_q = 0; | |
200 | for (prev = &(head->so_q); *prev; ) | |
201 | prev = &((*prev)->so_q); | |
202 | } | |
203 | *prev = so; | |
204 | } | |
205 | ||
206 | soqremque(so, q) | |
207 | register struct socket *so; | |
208 | int q; | |
209 | { | |
210 | register struct socket *head, *prev, *next; | |
211 | ||
212 | head = so->so_head; | |
213 | prev = head; | |
214 | for (;;) { | |
215 | next = q ? prev->so_q : prev->so_q0; | |
216 | if (next == so) | |
217 | break; | |
218 | if (next == 0) | |
219 | return (0); | |
220 | prev = next; | |
221 | } | |
222 | if (q == 0) { | |
223 | prev->so_q0 = next->so_q0; | |
224 | head->so_q0len--; | |
225 | } else { | |
226 | prev->so_q = next->so_q; | |
227 | head->so_qlen--; | |
228 | } | |
229 | next->so_q0 = next->so_q = 0; | |
230 | next->so_head = 0; | |
231 | return (1); | |
232 | } | |
233 | ||
234 | /* | |
235 | * Socantsendmore indicates that no more data will be sent on the | |
236 | * socket; it would normally be applied to a socket when the user | |
237 | * informs the system that no more data is to be sent, by the protocol | |
238 | * code (in case PRU_SHUTDOWN). Socantrcvmore indicates that no more data | |
239 | * will be received, and will normally be applied to the socket by a | |
240 | * protocol when it detects that the peer will send no more data. | |
241 | * Data queued for reading in the socket may yet be read. | |
242 | */ | |
243 | ||
244 | socantsendmore(so) | |
245 | struct socket *so; | |
246 | { | |
247 | ||
248 | so->so_state |= SS_CANTSENDMORE; | |
249 | sowwakeup(so); | |
250 | } | |
251 | ||
252 | socantrcvmore(so) | |
253 | struct socket *so; | |
254 | { | |
255 | ||
256 | so->so_state |= SS_CANTRCVMORE; | |
257 | sorwakeup(so); | |
258 | } | |
259 | ||
260 | /* | |
261 | * Socket select/wakeup routines. | |
262 | */ | |
263 | ||
264 | /* | |
265 | * Queue a process for a select on a socket buffer. | |
266 | */ | |
267 | sbselqueue(sb, cp) | |
268 | struct sockbuf *sb; | |
269 | struct proc *cp; | |
270 | { | |
271 | struct proc *p; | |
272 | ||
273 | if (sb->sb_sel && (p = pfind(sb->sb_sel)) && p->p_wchan == (caddr_t)&selwait) | |
274 | sb->sb_flags |= SB_COLL; | |
275 | else { | |
276 | sb->sb_sel = cp->p_pid; | |
277 | sb->sb_flags |= SB_SEL; | |
278 | } | |
279 | } | |
280 | ||
281 | /* | |
282 | * Wait for data to arrive at/drain from a socket buffer. | |
283 | */ | |
284 | sbwait(sb) | |
285 | struct sockbuf *sb; | |
286 | { | |
287 | ||
288 | sb->sb_flags |= SB_WAIT; | |
289 | return (tsleep((caddr_t)&sb->sb_cc, | |
290 | (sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK | PCATCH, netio, | |
291 | sb->sb_timeo)); | |
292 | } | |
293 | ||
294 | /* | |
295 | * Lock a sockbuf already known to be locked; | |
296 | * return any error returned from sleep (EINTR). | |
297 | */ | |
298 | sb_lock(sb) | |
299 | register struct sockbuf *sb; | |
300 | { | |
301 | int error; | |
302 | ||
303 | while (sb->sb_flags & SB_LOCK) { | |
304 | sb->sb_flags |= SB_WANT; | |
305 | if (error = tsleep((caddr_t)&sb->sb_flags, | |
306 | (sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK|PCATCH, | |
307 | netio, 0)) | |
308 | return (error); | |
309 | } | |
310 | sb->sb_flags |= SB_LOCK; | |
311 | return (0); | |
312 | } | |
313 | ||
314 | /* | |
315 | * Wakeup processes waiting on a socket buffer. | |
316 | * Do asynchronous notification via SIGIO | |
317 | * if the socket has the SS_ASYNC flag set. | |
318 | */ | |
319 | sowakeup(so, sb) | |
320 | register struct socket *so; | |
321 | register struct sockbuf *sb; | |
322 | { | |
323 | struct proc *p; | |
324 | ||
325 | if (sb->sb_sel) { | |
326 | selwakeup(sb->sb_sel, sb->sb_flags & SB_COLL); | |
327 | sb->sb_sel = 0; | |
328 | sb->sb_flags &= ~(SB_SEL|SB_COLL); | |
329 | } | |
330 | if (sb->sb_flags & SB_WAIT) { | |
331 | sb->sb_flags &= ~SB_WAIT; | |
332 | wakeup((caddr_t)&sb->sb_cc); | |
333 | } | |
334 | if (so->so_state & SS_ASYNC) { | |
335 | if (so->so_pgid < 0) | |
336 | gsignal(-so->so_pgid, SIGIO); | |
337 | else if (so->so_pgid > 0 && (p = pfind(so->so_pgid)) != 0) | |
338 | psignal(p, SIGIO); | |
339 | } | |
340 | } | |
341 | ||
342 | /* | |
343 | * Socket buffer (struct sockbuf) utility routines. | |
344 | * | |
345 | * Each socket contains two socket buffers: one for sending data and | |
346 | * one for receiving data. Each buffer contains a queue of mbufs, | |
347 | * information about the number of mbufs and amount of data in the | |
348 | * queue, and other fields allowing select() statements and notification | |
349 | * on data availability to be implemented. | |
350 | * | |
351 | * Data stored in a socket buffer is maintained as a list of records. | |
352 | * Each record is a list of mbufs chained together with the m_next | |
353 | * field. Records are chained together with the m_nextpkt field. The upper | |
354 | * level routine soreceive() expects the following conventions to be | |
355 | * observed when placing information in the receive buffer: | |
356 | * | |
357 | * 1. If the protocol requires each message be preceded by the sender's | |
358 | * name, then a record containing that name must be present before | |
359 | * any associated data (mbuf's must be of type MT_SONAME). | |
360 | * 2. If the protocol supports the exchange of ``access rights'' (really | |
361 | * just additional data associated with the message), and there are | |
362 | * ``rights'' to be received, then a record containing this data | |
363 | * should be present (mbuf's must be of type MT_RIGHTS). | |
364 | * 3. If a name or rights record exists, then it must be followed by | |
365 | * a data record, perhaps of zero length. | |
366 | * | |
367 | * Before using a new socket structure it is first necessary to reserve | |
368 | * buffer space to the socket, by calling sbreserve(). This should commit | |
369 | * some of the available buffer space in the system buffer pool for the | |
370 | * socket (currently, it does nothing but enforce limits). The space | |
371 | * should be released by calling sbrelease() when the socket is destroyed. | |
372 | */ | |
373 | ||
374 | soreserve(so, sndcc, rcvcc) | |
375 | register struct socket *so; | |
376 | u_long sndcc, rcvcc; | |
377 | { | |
378 | ||
379 | if (sbreserve(&so->so_snd, sndcc) == 0) | |
380 | goto bad; | |
381 | if (sbreserve(&so->so_rcv, rcvcc) == 0) | |
382 | goto bad2; | |
383 | if (so->so_rcv.sb_lowat == 0) | |
384 | so->so_rcv.sb_lowat = 1; | |
385 | if (so->so_snd.sb_lowat == 0) | |
386 | so->so_snd.sb_lowat = MCLBYTES; | |
387 | if (so->so_snd.sb_lowat > so->so_snd.sb_hiwat) | |
388 | so->so_snd.sb_lowat = so->so_snd.sb_hiwat; | |
389 | return (0); | |
390 | bad2: | |
391 | sbrelease(&so->so_snd); | |
392 | bad: | |
393 | return (ENOBUFS); | |
394 | } | |
395 | ||
396 | /* | |
397 | * Allot mbufs to a sockbuf. | |
398 | * Attempt to scale mbmax so that mbcnt doesn't become limiting | |
399 | * if buffering efficiency is near the normal case. | |
400 | */ | |
401 | sbreserve(sb, cc) | |
402 | struct sockbuf *sb; | |
403 | u_long cc; | |
404 | { | |
405 | ||
406 | if (cc > sb_max * MCLBYTES / (MSIZE + MCLBYTES)) | |
407 | return (0); | |
408 | sb->sb_hiwat = cc; | |
409 | sb->sb_mbmax = min(cc * 2, sb_max); | |
410 | if (sb->sb_lowat > sb->sb_hiwat) | |
411 | sb->sb_lowat = sb->sb_hiwat; | |
412 | return (1); | |
413 | } | |
414 | ||
415 | /* | |
416 | * Free mbufs held by a socket, and reserved mbuf space. | |
417 | */ | |
418 | sbrelease(sb) | |
419 | struct sockbuf *sb; | |
420 | { | |
421 | ||
422 | sbflush(sb); | |
423 | sb->sb_hiwat = sb->sb_mbmax = 0; | |
424 | } | |
425 | ||
426 | /* | |
427 | * Routines to add and remove | |
428 | * data from an mbuf queue. | |
429 | * | |
430 | * The routines sbappend() or sbappendrecord() are normally called to | |
431 | * append new mbufs to a socket buffer, after checking that adequate | |
432 | * space is available, comparing the function sbspace() with the amount | |
433 | * of data to be added. sbappendrecord() differs from sbappend() in | |
434 | * that data supplied is treated as the beginning of a new record. | |
435 | * To place a sender's address, optional access rights, and data in a | |
436 | * socket receive buffer, sbappendaddr() should be used. To place | |
437 | * access rights and data in a socket receive buffer, sbappendrights() | |
438 | * should be used. In either case, the new data begins a new record. | |
439 | * Note that unlike sbappend() and sbappendrecord(), these routines check | |
440 | * for the caller that there will be enough space to store the data. | |
441 | * Each fails if there is not enough space, or if it cannot find mbufs | |
442 | * to store additional information in. | |
443 | * | |
444 | * Reliable protocols may use the socket send buffer to hold data | |
445 | * awaiting acknowledgement. Data is normally copied from a socket | |
446 | * send buffer in a protocol with m_copy for output to a peer, | |
447 | * and then removing the data from the socket buffer with sbdrop() | |
448 | * or sbdroprecord() when the data is acknowledged by the peer. | |
449 | */ | |
450 | ||
451 | /* | |
452 | * Append mbuf chain m to the last record in the | |
453 | * socket buffer sb. The additional space associated | |
454 | * the mbuf chain is recorded in sb. Empty mbufs are | |
455 | * discarded and mbufs are compacted where possible. | |
456 | */ | |
457 | sbappend(sb, m) | |
458 | struct sockbuf *sb; | |
459 | struct mbuf *m; | |
460 | { | |
461 | register struct mbuf *n; | |
462 | ||
463 | if (m == 0) | |
464 | return; | |
465 | if (n = sb->sb_mb) { | |
466 | while (n->m_nextpkt) | |
467 | n = n->m_nextpkt; | |
468 | do { | |
469 | if (n->m_flags & M_EOR) { | |
470 | sbappendrecord(sb, m); /* XXXXXX!!!! */ | |
471 | return; | |
472 | } | |
473 | } while (n->m_next && (n = n->m_next)); | |
474 | } | |
475 | sbcompress(sb, m, n); | |
476 | } | |
477 | ||
478 | #ifdef SOCKBUF_DEBUG | |
479 | sbcheck(sb) | |
480 | register struct sockbuf *sb; | |
481 | { | |
482 | register struct mbuf *m; | |
483 | register int len = 0, mbcnt = 0; | |
484 | ||
485 | for (m = sb->sb_mb; m; m = m->m_next) { | |
486 | len += m->m_len; | |
487 | mbcnt += MSIZE; | |
488 | if (m->m_flags & M_EXT) | |
489 | mbcnt += m->m_ext.ext_size; | |
490 | if (m->m_nextpkt) | |
491 | panic("sbcheck nextpkt"); | |
492 | } | |
493 | if (len != sb->sb_cc || mbcnt != sb->sb_mbcnt) { | |
494 | printf("cc %d != %d || mbcnt %d != %d\n", len, sb->sb_cc, | |
495 | mbcnt, sb->sb_mbcnt); | |
496 | panic("sbcheck"); | |
497 | } | |
498 | } | |
499 | #endif | |
500 | ||
501 | /* | |
502 | * As above, except the mbuf chain | |
503 | * begins a new record. | |
504 | */ | |
505 | sbappendrecord(sb, m0) | |
506 | register struct sockbuf *sb; | |
507 | register struct mbuf *m0; | |
508 | { | |
509 | register struct mbuf *m; | |
510 | ||
511 | if (m0 == 0) | |
512 | return; | |
513 | if (m = sb->sb_mb) | |
514 | while (m->m_nextpkt) | |
515 | m = m->m_nextpkt; | |
516 | /* | |
517 | * Put the first mbuf on the queue. | |
518 | * Note this permits zero length records. | |
519 | */ | |
520 | sballoc(sb, m0); | |
521 | if (m) | |
522 | m->m_nextpkt = m0; | |
523 | else | |
524 | sb->sb_mb = m0; | |
525 | m = m0->m_next; | |
526 | m0->m_next = 0; | |
527 | if (m && (m0->m_flags & M_EOR)) { | |
528 | m0->m_flags &= ~M_EOR; | |
529 | m->m_flags |= M_EOR; | |
530 | } | |
531 | sbcompress(sb, m, m0); | |
532 | } | |
533 | ||
534 | /* | |
535 | * As above except that OOB data | |
536 | * is inserted at the beginning of the sockbuf, | |
537 | * but after any other OOB data. | |
538 | */ | |
539 | sbinsertoob(sb, m0) | |
540 | register struct sockbuf *sb; | |
541 | register struct mbuf *m0; | |
542 | { | |
543 | register struct mbuf *m; | |
544 | register struct mbuf **mp; | |
545 | ||
546 | if (m0 == 0) | |
547 | return; | |
548 | for (mp = &sb->sb_mb; m = *mp; mp = &((*mp)->m_nextpkt)) { | |
549 | again: | |
550 | switch (m->m_type) { | |
551 | ||
552 | case MT_OOBDATA: | |
553 | continue; /* WANT next train */ | |
554 | ||
555 | case MT_CONTROL: | |
556 | if (m = m->m_next) | |
557 | goto again; /* inspect THIS train further */ | |
558 | } | |
559 | break; | |
560 | } | |
561 | /* | |
562 | * Put the first mbuf on the queue. | |
563 | * Note this permits zero length records. | |
564 | */ | |
565 | sballoc(sb, m0); | |
566 | m0->m_nextpkt = *mp; | |
567 | *mp = m0; | |
568 | m = m0->m_next; | |
569 | m0->m_next = 0; | |
570 | if (m && (m0->m_flags & M_EOR)) { | |
571 | m0->m_flags &= ~M_EOR; | |
572 | m->m_flags |= M_EOR; | |
573 | } | |
574 | sbcompress(sb, m, m0); | |
575 | } | |
576 | ||
577 | /* | |
578 | * Append address and data, and optionally, control (ancillary) data | |
579 | * to the receive queue of a socket. If present, | |
580 | * m0 must include a packet header with total length. | |
581 | * Returns 0 if no space in sockbuf or insufficient mbufs. | |
582 | */ | |
583 | sbappendaddr(sb, asa, m0, control) | |
584 | register struct sockbuf *sb; | |
585 | struct sockaddr *asa; | |
586 | struct mbuf *m0, *control; | |
587 | { | |
588 | register struct mbuf *m, *n; | |
589 | int space = asa->sa_len; | |
590 | ||
591 | if (m0 && (m0->m_flags & M_PKTHDR) == 0) | |
592 | panic("sbappendaddr"); | |
593 | if (m0) | |
594 | space += m0->m_pkthdr.len; | |
595 | for (n = control; n; n = n->m_next) { | |
596 | space += n->m_len; | |
597 | if (n->m_next == 0) /* keep pointer to last control buf */ | |
598 | break; | |
599 | } | |
600 | if (space > sbspace(sb)) | |
601 | return (0); | |
602 | if (asa->sa_len > MLEN) | |
603 | return (0); | |
604 | MGET(m, M_DONTWAIT, MT_SONAME); | |
605 | if (m == 0) | |
606 | return (0); | |
607 | m->m_len = asa->sa_len; | |
608 | bcopy((caddr_t)asa, mtod(m, caddr_t), asa->sa_len); | |
609 | if (n) | |
610 | n->m_next = m0; /* concatenate data to control */ | |
611 | else | |
612 | control = m0; | |
613 | m->m_next = control; | |
614 | for (n = m; n; n = n->m_next) | |
615 | sballoc(sb, n); | |
616 | if (n = sb->sb_mb) { | |
617 | while (n->m_nextpkt) | |
618 | n = n->m_nextpkt; | |
619 | n->m_nextpkt = m; | |
620 | } else | |
621 | sb->sb_mb = m; | |
622 | return (1); | |
623 | } | |
624 | ||
625 | sbappendcontrol(sb, m0, control) | |
626 | struct sockbuf *sb; | |
627 | struct mbuf *control, *m0; | |
628 | { | |
629 | register struct mbuf *m, *n; | |
630 | int space = 0; | |
631 | ||
632 | if (control == 0) | |
633 | panic("sbappendcontrol"); | |
634 | for (m = control; ; m = m->m_next) { | |
635 | space += m->m_len; | |
636 | if (m->m_next == 0) | |
637 | break; | |
638 | } | |
639 | n = m; /* save pointer to last control buffer */ | |
640 | for (m = m0; m; m = m->m_next) | |
641 | space += m->m_len; | |
642 | if (space > sbspace(sb)) | |
643 | return (0); | |
644 | n->m_next = m0; /* concatenate data to control */ | |
645 | for (m = control; m; m = m->m_next) | |
646 | sballoc(sb, m); | |
647 | if (n = sb->sb_mb) { | |
648 | while (n->m_nextpkt) | |
649 | n = n->m_nextpkt; | |
650 | n->m_nextpkt = control; | |
651 | } else | |
652 | sb->sb_mb = control; | |
653 | return (1); | |
654 | } | |
655 | ||
656 | /* | |
657 | * Compress mbuf chain m into the socket | |
658 | * buffer sb following mbuf n. If n | |
659 | * is null, the buffer is presumed empty. | |
660 | */ | |
661 | sbcompress(sb, m, n) | |
662 | register struct sockbuf *sb; | |
663 | register struct mbuf *m, *n; | |
664 | { | |
665 | register int eor = 0; | |
666 | register struct mbuf *o; | |
667 | ||
668 | while (m) { | |
669 | eor |= m->m_flags & M_EOR; | |
670 | if (m->m_len == 0 && | |
671 | (eor == 0 || | |
672 | (((o = m->m_next) || (o = n)) && | |
673 | o->m_type == m->m_type))) { | |
674 | m = m_free(m); | |
675 | continue; | |
676 | } | |
677 | if (n && (n->m_flags & (M_EXT | M_EOR)) == 0 && | |
678 | (n->m_data + n->m_len + m->m_len) < &n->m_dat[MLEN] && | |
679 | n->m_type == m->m_type) { | |
680 | bcopy(mtod(m, caddr_t), mtod(n, caddr_t) + n->m_len, | |
681 | (unsigned)m->m_len); | |
682 | n->m_len += m->m_len; | |
683 | sb->sb_cc += m->m_len; | |
684 | m = m_free(m); | |
685 | continue; | |
686 | } | |
687 | if (n) | |
688 | n->m_next = m; | |
689 | else | |
690 | sb->sb_mb = m; | |
691 | sballoc(sb, m); | |
692 | n = m; | |
693 | m->m_flags &= ~M_EOR; | |
694 | m = m->m_next; | |
695 | n->m_next = 0; | |
696 | } | |
697 | if (eor) { | |
698 | if (n) | |
699 | n->m_flags |= eor; | |
700 | else | |
701 | printf("semi-panic: sbcompress\n"); | |
702 | } | |
703 | } | |
704 | ||
705 | /* | |
706 | * Free all mbufs in a sockbuf. | |
707 | * Check that all resources are reclaimed. | |
708 | */ | |
709 | sbflush(sb) | |
710 | register struct sockbuf *sb; | |
711 | { | |
712 | ||
713 | if (sb->sb_flags & SB_LOCK) | |
714 | panic("sbflush"); | |
715 | while (sb->sb_mbcnt) | |
716 | sbdrop(sb, (int)sb->sb_cc); | |
717 | if (sb->sb_cc || sb->sb_mb) | |
718 | panic("sbflush 2"); | |
719 | } | |
720 | ||
721 | /* | |
722 | * Drop data from (the front of) a sockbuf. | |
723 | */ | |
724 | sbdrop(sb, len) | |
725 | register struct sockbuf *sb; | |
726 | register int len; | |
727 | { | |
728 | register struct mbuf *m, *mn; | |
729 | struct mbuf *next; | |
730 | ||
731 | next = (m = sb->sb_mb) ? m->m_nextpkt : 0; | |
732 | while (len > 0) { | |
733 | if (m == 0) { | |
734 | if (next == 0) | |
735 | panic("sbdrop"); | |
736 | m = next; | |
737 | next = m->m_nextpkt; | |
738 | continue; | |
739 | } | |
740 | if (m->m_len > len) { | |
741 | m->m_len -= len; | |
742 | m->m_data += len; | |
743 | sb->sb_cc -= len; | |
744 | break; | |
745 | } | |
746 | len -= m->m_len; | |
747 | sbfree(sb, m); | |
748 | MFREE(m, mn); | |
749 | m = mn; | |
750 | } | |
751 | while (m && m->m_len == 0) { | |
752 | sbfree(sb, m); | |
753 | MFREE(m, mn); | |
754 | m = mn; | |
755 | } | |
756 | if (m) { | |
757 | sb->sb_mb = m; | |
758 | m->m_nextpkt = next; | |
759 | } else | |
760 | sb->sb_mb = next; | |
761 | } | |
762 | ||
763 | /* | |
764 | * Drop a record off the front of a sockbuf | |
765 | * and move the next record to the front. | |
766 | */ | |
767 | sbdroprecord(sb) | |
768 | register struct sockbuf *sb; | |
769 | { | |
770 | register struct mbuf *m, *mn; | |
771 | ||
772 | m = sb->sb_mb; | |
773 | if (m) { | |
774 | sb->sb_mb = m->m_nextpkt; | |
775 | do { | |
776 | sbfree(sb, m); | |
777 | MFREE(m, mn); | |
778 | } while (m = mn); | |
779 | } | |
780 | } |