Commit | Line | Data |
---|---|---|
15637ed4 RG |
1 | /* |
2 | * Copyright (c) 1982, 1986, 1988, 1990 Regents of the University of California. | |
3 | * All rights reserved. | |
4 | * | |
5 | * Redistribution and use in source and binary forms, with or without | |
6 | * modification, are permitted provided that the following conditions | |
7 | * are met: | |
8 | * 1. Redistributions of source code must retain the above copyright | |
9 | * notice, this list of conditions and the following disclaimer. | |
10 | * 2. Redistributions in binary form must reproduce the above copyright | |
11 | * notice, this list of conditions and the following disclaimer in the | |
12 | * documentation and/or other materials provided with the distribution. | |
13 | * 3. All advertising materials mentioning features or use of this software | |
14 | * must display the following acknowledgement: | |
15 | * This product includes software developed by the University of | |
16 | * California, Berkeley and its contributors. | |
17 | * 4. Neither the name of the University nor the names of its contributors | |
18 | * may be used to endorse or promote products derived from this software | |
19 | * without specific prior written permission. | |
20 | * | |
21 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
22 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
23 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
24 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
25 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
26 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
27 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
28 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
29 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
30 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
31 | * SUCH DAMAGE. | |
32 | * | |
600f7f07 | 33 | * from: @(#)uipc_socket2.c 7.17 (Berkeley) 5/4/91 |
fde1aeb2 | 34 | * $Id: uipc_socket2.c,v 1.3 1993/11/25 01:33:35 wollman Exp $ |
15637ed4 RG |
35 | */ |
36 | ||
37 | #include "param.h" | |
38 | #include "systm.h" | |
39 | #include "proc.h" | |
40 | #include "file.h" | |
41 | #include "buf.h" | |
42 | #include "malloc.h" | |
43 | #include "mbuf.h" | |
44 | #include "protosw.h" | |
45 | #include "socket.h" | |
46 | #include "socketvar.h" | |
47 | ||
48 | /* | |
49 | * Primitive routines for operating on sockets and socket buffers | |
50 | */ | |
51 | ||
52 | /* strings for sleep message: */ | |
4c45483e GW |
53 | const char netio[] = "netio"; |
54 | const char netcon[] = "netcon"; | |
55 | const char netcls[] = "netcls"; | |
15637ed4 RG |
56 | |
57 | u_long sb_max = SB_MAX; /* patchable */ | |
58 | ||
59 | /* | |
60 | * Procedures to manipulate state flags of socket | |
61 | * and do appropriate wakeups. Normal sequence from the | |
62 | * active (originating) side is that soisconnecting() is | |
63 | * called during processing of connect() call, | |
64 | * resulting in an eventual call to soisconnected() if/when the | |
65 | * connection is established. When the connection is torn down | |
66 | * soisdisconnecting() is called during processing of disconnect() call, | |
67 | * and soisdisconnected() is called when the connection to the peer | |
68 | * is totally severed. The semantics of these routines are such that | |
69 | * connectionless protocols can call soisconnected() and soisdisconnected() | |
70 | * only, bypassing the in-progress calls when setting up a ``connection'' | |
71 | * takes no time. | |
72 | * | |
73 | * From the passive side, a socket is created with | |
74 | * two queues of sockets: so_q0 for connections in progress | |
75 | * and so_q for connections already made and awaiting user acceptance. | |
76 | * As a protocol is preparing incoming connections, it creates a socket | |
77 | * structure queued on so_q0 by calling sonewconn(). When the connection | |
78 | * is established, soisconnected() is called, and transfers the | |
79 | * socket structure to so_q, making it available to accept(). | |
80 | * | |
81 | * If a socket is closed with sockets on either | |
82 | * so_q0 or so_q, these sockets are dropped. | |
83 | * | |
84 | * If higher level protocols are implemented in | |
85 | * the kernel, the wakeups done here will sometimes | |
86 | * cause software-interrupt process scheduling. | |
87 | */ | |
88 | ||
4c45483e | 89 | void |
15637ed4 RG |
90 | soisconnecting(so) |
91 | register struct socket *so; | |
92 | { | |
93 | ||
94 | so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING); | |
95 | so->so_state |= SS_ISCONNECTING; | |
96 | } | |
97 | ||
4c45483e | 98 | void |
15637ed4 RG |
99 | soisconnected(so) |
100 | register struct socket *so; | |
101 | { | |
102 | register struct socket *head = so->so_head; | |
103 | ||
104 | so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING); | |
105 | so->so_state |= SS_ISCONNECTED; | |
106 | if (head && soqremque(so, 0)) { | |
107 | soqinsque(head, so, 1); | |
108 | sorwakeup(head); | |
109 | wakeup((caddr_t)&head->so_timeo); | |
110 | } else { | |
111 | wakeup((caddr_t)&so->so_timeo); | |
112 | sorwakeup(so); | |
113 | sowwakeup(so); | |
114 | } | |
115 | } | |
116 | ||
4c45483e | 117 | void |
15637ed4 RG |
118 | soisdisconnecting(so) |
119 | register struct socket *so; | |
120 | { | |
121 | ||
122 | so->so_state &= ~SS_ISCONNECTING; | |
123 | so->so_state |= (SS_ISDISCONNECTING|SS_CANTRCVMORE|SS_CANTSENDMORE); | |
124 | wakeup((caddr_t)&so->so_timeo); | |
125 | sowwakeup(so); | |
126 | sorwakeup(so); | |
127 | } | |
128 | ||
4c45483e | 129 | void |
15637ed4 RG |
130 | soisdisconnected(so) |
131 | register struct socket *so; | |
132 | { | |
133 | ||
134 | so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING); | |
135 | so->so_state |= (SS_CANTRCVMORE|SS_CANTSENDMORE); | |
136 | wakeup((caddr_t)&so->so_timeo); | |
137 | sowwakeup(so); | |
138 | sorwakeup(so); | |
139 | } | |
140 | ||
141 | /* | |
142 | * When an attempt at a new connection is noted on a socket | |
143 | * which accepts connections, sonewconn is called. If the | |
144 | * connection is possible (subject to space constraints, etc.) | |
145 | * then we allocate a new structure, propoerly linked into the | |
146 | * data structure of the original socket, and return this. | |
147 | * Connstatus may be 0, or SO_ISCONFIRMING, or SO_ISCONNECTED. | |
148 | * | |
149 | * Currently, sonewconn() is defined as sonewconn1() in socketvar.h | |
150 | * to catch calls that are missing the (new) second parameter. | |
151 | */ | |
152 | struct socket * | |
153 | sonewconn1(head, connstatus) | |
154 | register struct socket *head; | |
155 | int connstatus; | |
156 | { | |
157 | register struct socket *so; | |
158 | int soqueue = connstatus ? 1 : 0; | |
159 | ||
160 | if (head->so_qlen + head->so_q0len > 3 * head->so_qlimit / 2) | |
161 | return ((struct socket *)0); | |
162 | MALLOC(so, struct socket *, sizeof(*so), M_SOCKET, M_DONTWAIT); | |
163 | if (so == NULL) | |
164 | return ((struct socket *)0); | |
165 | bzero((caddr_t)so, sizeof(*so)); | |
166 | so->so_type = head->so_type; | |
167 | so->so_options = head->so_options &~ SO_ACCEPTCONN; | |
168 | so->so_linger = head->so_linger; | |
169 | so->so_state = head->so_state | SS_NOFDREF; | |
170 | so->so_proto = head->so_proto; | |
171 | so->so_timeo = head->so_timeo; | |
172 | so->so_pgid = head->so_pgid; | |
173 | (void) soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat); | |
174 | soqinsque(head, so, soqueue); | |
175 | if ((*so->so_proto->pr_usrreq)(so, PRU_ATTACH, | |
fde1aeb2 GW |
176 | (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0, |
177 | (struct mbuf *)0)) { | |
15637ed4 RG |
178 | (void) soqremque(so, soqueue); |
179 | (void) free((caddr_t)so, M_SOCKET); | |
180 | return ((struct socket *)0); | |
181 | } | |
182 | if (connstatus) { | |
183 | sorwakeup(head); | |
184 | wakeup((caddr_t)&head->so_timeo); | |
185 | so->so_state |= connstatus; | |
186 | } | |
187 | return (so); | |
188 | } | |
189 | ||
4c45483e | 190 | void |
15637ed4 RG |
191 | soqinsque(head, so, q) |
192 | register struct socket *head, *so; | |
193 | int q; | |
194 | { | |
195 | ||
196 | register struct socket **prev; | |
197 | so->so_head = head; | |
198 | if (q == 0) { | |
199 | head->so_q0len++; | |
200 | so->so_q0 = 0; | |
201 | for (prev = &(head->so_q0); *prev; ) | |
202 | prev = &((*prev)->so_q0); | |
203 | } else { | |
204 | head->so_qlen++; | |
205 | so->so_q = 0; | |
206 | for (prev = &(head->so_q); *prev; ) | |
207 | prev = &((*prev)->so_q); | |
208 | } | |
209 | *prev = so; | |
210 | } | |
211 | ||
4c45483e | 212 | int |
15637ed4 RG |
213 | soqremque(so, q) |
214 | register struct socket *so; | |
215 | int q; | |
216 | { | |
217 | register struct socket *head, *prev, *next; | |
218 | ||
219 | head = so->so_head; | |
220 | prev = head; | |
221 | for (;;) { | |
222 | next = q ? prev->so_q : prev->so_q0; | |
223 | if (next == so) | |
224 | break; | |
225 | if (next == 0) | |
226 | return (0); | |
227 | prev = next; | |
228 | } | |
229 | if (q == 0) { | |
230 | prev->so_q0 = next->so_q0; | |
231 | head->so_q0len--; | |
232 | } else { | |
233 | prev->so_q = next->so_q; | |
234 | head->so_qlen--; | |
235 | } | |
236 | next->so_q0 = next->so_q = 0; | |
237 | next->so_head = 0; | |
238 | return (1); | |
239 | } | |
240 | ||
241 | /* | |
242 | * Socantsendmore indicates that no more data will be sent on the | |
243 | * socket; it would normally be applied to a socket when the user | |
244 | * informs the system that no more data is to be sent, by the protocol | |
245 | * code (in case PRU_SHUTDOWN). Socantrcvmore indicates that no more data | |
246 | * will be received, and will normally be applied to the socket by a | |
247 | * protocol when it detects that the peer will send no more data. | |
248 | * Data queued for reading in the socket may yet be read. | |
249 | */ | |
250 | ||
4c45483e | 251 | void |
15637ed4 RG |
252 | socantsendmore(so) |
253 | struct socket *so; | |
254 | { | |
255 | ||
256 | so->so_state |= SS_CANTSENDMORE; | |
257 | sowwakeup(so); | |
258 | } | |
259 | ||
4c45483e | 260 | void |
15637ed4 RG |
261 | socantrcvmore(so) |
262 | struct socket *so; | |
263 | { | |
264 | ||
265 | so->so_state |= SS_CANTRCVMORE; | |
266 | sorwakeup(so); | |
267 | } | |
268 | ||
269 | /* | |
270 | * Socket select/wakeup routines. | |
271 | */ | |
272 | ||
273 | /* | |
274 | * Queue a process for a select on a socket buffer. | |
275 | */ | |
4c45483e | 276 | void |
15637ed4 RG |
277 | sbselqueue(sb, cp) |
278 | struct sockbuf *sb; | |
279 | struct proc *cp; | |
280 | { | |
281 | struct proc *p; | |
282 | ||
283 | if (sb->sb_sel && (p = pfind(sb->sb_sel)) && p->p_wchan == (caddr_t)&selwait) | |
284 | sb->sb_flags |= SB_COLL; | |
285 | else { | |
286 | sb->sb_sel = cp->p_pid; | |
287 | sb->sb_flags |= SB_SEL; | |
288 | } | |
289 | } | |
290 | ||
291 | /* | |
292 | * Wait for data to arrive at/drain from a socket buffer. | |
293 | */ | |
4c45483e | 294 | int |
15637ed4 RG |
295 | sbwait(sb) |
296 | struct sockbuf *sb; | |
297 | { | |
298 | ||
299 | sb->sb_flags |= SB_WAIT; | |
300 | return (tsleep((caddr_t)&sb->sb_cc, | |
301 | (sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK | PCATCH, netio, | |
302 | sb->sb_timeo)); | |
303 | } | |
304 | ||
305 | /* | |
306 | * Lock a sockbuf already known to be locked; | |
307 | * return any error returned from sleep (EINTR). | |
308 | */ | |
4c45483e | 309 | int |
15637ed4 RG |
310 | sb_lock(sb) |
311 | register struct sockbuf *sb; | |
312 | { | |
313 | int error; | |
314 | ||
315 | while (sb->sb_flags & SB_LOCK) { | |
316 | sb->sb_flags |= SB_WANT; | |
317 | if (error = tsleep((caddr_t)&sb->sb_flags, | |
318 | (sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK|PCATCH, | |
319 | netio, 0)) | |
320 | return (error); | |
321 | } | |
322 | sb->sb_flags |= SB_LOCK; | |
323 | return (0); | |
324 | } | |
325 | ||
326 | /* | |
327 | * Wakeup processes waiting on a socket buffer. | |
328 | * Do asynchronous notification via SIGIO | |
329 | * if the socket has the SS_ASYNC flag set. | |
330 | */ | |
4c45483e | 331 | void |
15637ed4 RG |
332 | sowakeup(so, sb) |
333 | register struct socket *so; | |
334 | register struct sockbuf *sb; | |
335 | { | |
336 | struct proc *p; | |
337 | ||
338 | if (sb->sb_sel) { | |
339 | selwakeup(sb->sb_sel, sb->sb_flags & SB_COLL); | |
340 | sb->sb_sel = 0; | |
341 | sb->sb_flags &= ~(SB_SEL|SB_COLL); | |
342 | } | |
343 | if (sb->sb_flags & SB_WAIT) { | |
344 | sb->sb_flags &= ~SB_WAIT; | |
345 | wakeup((caddr_t)&sb->sb_cc); | |
346 | } | |
347 | if (so->so_state & SS_ASYNC) { | |
348 | if (so->so_pgid < 0) | |
349 | gsignal(-so->so_pgid, SIGIO); | |
350 | else if (so->so_pgid > 0 && (p = pfind(so->so_pgid)) != 0) | |
351 | psignal(p, SIGIO); | |
352 | } | |
353 | } | |
354 | ||
355 | /* | |
356 | * Socket buffer (struct sockbuf) utility routines. | |
357 | * | |
358 | * Each socket contains two socket buffers: one for sending data and | |
359 | * one for receiving data. Each buffer contains a queue of mbufs, | |
360 | * information about the number of mbufs and amount of data in the | |
361 | * queue, and other fields allowing select() statements and notification | |
362 | * on data availability to be implemented. | |
363 | * | |
364 | * Data stored in a socket buffer is maintained as a list of records. | |
365 | * Each record is a list of mbufs chained together with the m_next | |
366 | * field. Records are chained together with the m_nextpkt field. The upper | |
367 | * level routine soreceive() expects the following conventions to be | |
368 | * observed when placing information in the receive buffer: | |
369 | * | |
370 | * 1. If the protocol requires each message be preceded by the sender's | |
371 | * name, then a record containing that name must be present before | |
372 | * any associated data (mbuf's must be of type MT_SONAME). | |
373 | * 2. If the protocol supports the exchange of ``access rights'' (really | |
374 | * just additional data associated with the message), and there are | |
375 | * ``rights'' to be received, then a record containing this data | |
376 | * should be present (mbuf's must be of type MT_RIGHTS). | |
377 | * 3. If a name or rights record exists, then it must be followed by | |
378 | * a data record, perhaps of zero length. | |
379 | * | |
380 | * Before using a new socket structure it is first necessary to reserve | |
381 | * buffer space to the socket, by calling sbreserve(). This should commit | |
382 | * some of the available buffer space in the system buffer pool for the | |
383 | * socket (currently, it does nothing but enforce limits). The space | |
384 | * should be released by calling sbrelease() when the socket is destroyed. | |
385 | */ | |
386 | ||
4c45483e | 387 | int |
15637ed4 RG |
388 | soreserve(so, sndcc, rcvcc) |
389 | register struct socket *so; | |
390 | u_long sndcc, rcvcc; | |
391 | { | |
392 | ||
393 | if (sbreserve(&so->so_snd, sndcc) == 0) | |
394 | goto bad; | |
395 | if (sbreserve(&so->so_rcv, rcvcc) == 0) | |
396 | goto bad2; | |
397 | if (so->so_rcv.sb_lowat == 0) | |
398 | so->so_rcv.sb_lowat = 1; | |
399 | if (so->so_snd.sb_lowat == 0) | |
400 | so->so_snd.sb_lowat = MCLBYTES; | |
401 | if (so->so_snd.sb_lowat > so->so_snd.sb_hiwat) | |
402 | so->so_snd.sb_lowat = so->so_snd.sb_hiwat; | |
403 | return (0); | |
404 | bad2: | |
405 | sbrelease(&so->so_snd); | |
406 | bad: | |
407 | return (ENOBUFS); | |
408 | } | |
409 | ||
410 | /* | |
411 | * Allot mbufs to a sockbuf. | |
412 | * Attempt to scale mbmax so that mbcnt doesn't become limiting | |
413 | * if buffering efficiency is near the normal case. | |
414 | */ | |
4c45483e | 415 | int |
15637ed4 RG |
416 | sbreserve(sb, cc) |
417 | struct sockbuf *sb; | |
418 | u_long cc; | |
419 | { | |
420 | ||
421 | if (cc > sb_max * MCLBYTES / (MSIZE + MCLBYTES)) | |
422 | return (0); | |
423 | sb->sb_hiwat = cc; | |
424 | sb->sb_mbmax = min(cc * 2, sb_max); | |
425 | if (sb->sb_lowat > sb->sb_hiwat) | |
426 | sb->sb_lowat = sb->sb_hiwat; | |
427 | return (1); | |
428 | } | |
429 | ||
430 | /* | |
431 | * Free mbufs held by a socket, and reserved mbuf space. | |
432 | */ | |
4c45483e | 433 | void |
15637ed4 RG |
434 | sbrelease(sb) |
435 | struct sockbuf *sb; | |
436 | { | |
437 | ||
438 | sbflush(sb); | |
439 | sb->sb_hiwat = sb->sb_mbmax = 0; | |
440 | } | |
441 | ||
442 | /* | |
443 | * Routines to add and remove | |
444 | * data from an mbuf queue. | |
445 | * | |
446 | * The routines sbappend() or sbappendrecord() are normally called to | |
447 | * append new mbufs to a socket buffer, after checking that adequate | |
448 | * space is available, comparing the function sbspace() with the amount | |
449 | * of data to be added. sbappendrecord() differs from sbappend() in | |
450 | * that data supplied is treated as the beginning of a new record. | |
451 | * To place a sender's address, optional access rights, and data in a | |
452 | * socket receive buffer, sbappendaddr() should be used. To place | |
453 | * access rights and data in a socket receive buffer, sbappendrights() | |
454 | * should be used. In either case, the new data begins a new record. | |
455 | * Note that unlike sbappend() and sbappendrecord(), these routines check | |
456 | * for the caller that there will be enough space to store the data. | |
457 | * Each fails if there is not enough space, or if it cannot find mbufs | |
458 | * to store additional information in. | |
459 | * | |
460 | * Reliable protocols may use the socket send buffer to hold data | |
461 | * awaiting acknowledgement. Data is normally copied from a socket | |
462 | * send buffer in a protocol with m_copy for output to a peer, | |
463 | * and then removing the data from the socket buffer with sbdrop() | |
464 | * or sbdroprecord() when the data is acknowledged by the peer. | |
465 | */ | |
466 | ||
467 | /* | |
468 | * Append mbuf chain m to the last record in the | |
469 | * socket buffer sb. The additional space associated | |
470 | * the mbuf chain is recorded in sb. Empty mbufs are | |
471 | * discarded and mbufs are compacted where possible. | |
472 | */ | |
4c45483e | 473 | void |
15637ed4 RG |
474 | sbappend(sb, m) |
475 | struct sockbuf *sb; | |
476 | struct mbuf *m; | |
477 | { | |
478 | register struct mbuf *n; | |
479 | ||
480 | if (m == 0) | |
481 | return; | |
482 | if (n = sb->sb_mb) { | |
483 | while (n->m_nextpkt) | |
484 | n = n->m_nextpkt; | |
485 | do { | |
486 | if (n->m_flags & M_EOR) { | |
487 | sbappendrecord(sb, m); /* XXXXXX!!!! */ | |
488 | return; | |
489 | } | |
490 | } while (n->m_next && (n = n->m_next)); | |
491 | } | |
492 | sbcompress(sb, m, n); | |
493 | } | |
494 | ||
495 | #ifdef SOCKBUF_DEBUG | |
496 | sbcheck(sb) | |
497 | register struct sockbuf *sb; | |
498 | { | |
499 | register struct mbuf *m; | |
500 | register int len = 0, mbcnt = 0; | |
501 | ||
502 | for (m = sb->sb_mb; m; m = m->m_next) { | |
503 | len += m->m_len; | |
504 | mbcnt += MSIZE; | |
505 | if (m->m_flags & M_EXT) | |
506 | mbcnt += m->m_ext.ext_size; | |
507 | if (m->m_nextpkt) | |
508 | panic("sbcheck nextpkt"); | |
509 | } | |
510 | if (len != sb->sb_cc || mbcnt != sb->sb_mbcnt) { | |
511 | printf("cc %d != %d || mbcnt %d != %d\n", len, sb->sb_cc, | |
512 | mbcnt, sb->sb_mbcnt); | |
513 | panic("sbcheck"); | |
514 | } | |
515 | } | |
516 | #endif | |
517 | ||
518 | /* | |
519 | * As above, except the mbuf chain | |
520 | * begins a new record. | |
521 | */ | |
4c45483e | 522 | void |
15637ed4 RG |
523 | sbappendrecord(sb, m0) |
524 | register struct sockbuf *sb; | |
525 | register struct mbuf *m0; | |
526 | { | |
527 | register struct mbuf *m; | |
528 | ||
529 | if (m0 == 0) | |
530 | return; | |
531 | if (m = sb->sb_mb) | |
532 | while (m->m_nextpkt) | |
533 | m = m->m_nextpkt; | |
534 | /* | |
535 | * Put the first mbuf on the queue. | |
536 | * Note this permits zero length records. | |
537 | */ | |
538 | sballoc(sb, m0); | |
539 | if (m) | |
540 | m->m_nextpkt = m0; | |
541 | else | |
542 | sb->sb_mb = m0; | |
543 | m = m0->m_next; | |
544 | m0->m_next = 0; | |
545 | if (m && (m0->m_flags & M_EOR)) { | |
546 | m0->m_flags &= ~M_EOR; | |
547 | m->m_flags |= M_EOR; | |
548 | } | |
549 | sbcompress(sb, m, m0); | |
550 | } | |
551 | ||
552 | /* | |
553 | * As above except that OOB data | |
554 | * is inserted at the beginning of the sockbuf, | |
555 | * but after any other OOB data. | |
556 | */ | |
4c45483e | 557 | void |
15637ed4 RG |
558 | sbinsertoob(sb, m0) |
559 | register struct sockbuf *sb; | |
560 | register struct mbuf *m0; | |
561 | { | |
562 | register struct mbuf *m; | |
563 | register struct mbuf **mp; | |
564 | ||
565 | if (m0 == 0) | |
566 | return; | |
567 | for (mp = &sb->sb_mb; m = *mp; mp = &((*mp)->m_nextpkt)) { | |
568 | again: | |
569 | switch (m->m_type) { | |
570 | ||
571 | case MT_OOBDATA: | |
572 | continue; /* WANT next train */ | |
573 | ||
574 | case MT_CONTROL: | |
575 | if (m = m->m_next) | |
576 | goto again; /* inspect THIS train further */ | |
577 | } | |
578 | break; | |
579 | } | |
580 | /* | |
581 | * Put the first mbuf on the queue. | |
582 | * Note this permits zero length records. | |
583 | */ | |
584 | sballoc(sb, m0); | |
585 | m0->m_nextpkt = *mp; | |
586 | *mp = m0; | |
587 | m = m0->m_next; | |
588 | m0->m_next = 0; | |
589 | if (m && (m0->m_flags & M_EOR)) { | |
590 | m0->m_flags &= ~M_EOR; | |
591 | m->m_flags |= M_EOR; | |
592 | } | |
593 | sbcompress(sb, m, m0); | |
594 | } | |
595 | ||
596 | /* | |
597 | * Append address and data, and optionally, control (ancillary) data | |
598 | * to the receive queue of a socket. If present, | |
599 | * m0 must include a packet header with total length. | |
600 | * Returns 0 if no space in sockbuf or insufficient mbufs. | |
601 | */ | |
4c45483e | 602 | int |
15637ed4 RG |
603 | sbappendaddr(sb, asa, m0, control) |
604 | register struct sockbuf *sb; | |
605 | struct sockaddr *asa; | |
606 | struct mbuf *m0, *control; | |
607 | { | |
608 | register struct mbuf *m, *n; | |
609 | int space = asa->sa_len; | |
610 | ||
611 | if (m0 && (m0->m_flags & M_PKTHDR) == 0) | |
612 | panic("sbappendaddr"); | |
613 | if (m0) | |
614 | space += m0->m_pkthdr.len; | |
615 | for (n = control; n; n = n->m_next) { | |
616 | space += n->m_len; | |
617 | if (n->m_next == 0) /* keep pointer to last control buf */ | |
618 | break; | |
619 | } | |
620 | if (space > sbspace(sb)) | |
621 | return (0); | |
622 | if (asa->sa_len > MLEN) | |
623 | return (0); | |
624 | MGET(m, M_DONTWAIT, MT_SONAME); | |
625 | if (m == 0) | |
626 | return (0); | |
627 | m->m_len = asa->sa_len; | |
628 | bcopy((caddr_t)asa, mtod(m, caddr_t), asa->sa_len); | |
629 | if (n) | |
630 | n->m_next = m0; /* concatenate data to control */ | |
631 | else | |
632 | control = m0; | |
633 | m->m_next = control; | |
634 | for (n = m; n; n = n->m_next) | |
635 | sballoc(sb, n); | |
636 | if (n = sb->sb_mb) { | |
637 | while (n->m_nextpkt) | |
638 | n = n->m_nextpkt; | |
639 | n->m_nextpkt = m; | |
640 | } else | |
641 | sb->sb_mb = m; | |
642 | return (1); | |
643 | } | |
644 | ||
4c45483e | 645 | int |
15637ed4 RG |
646 | sbappendcontrol(sb, m0, control) |
647 | struct sockbuf *sb; | |
648 | struct mbuf *control, *m0; | |
649 | { | |
650 | register struct mbuf *m, *n; | |
651 | int space = 0; | |
652 | ||
653 | if (control == 0) | |
654 | panic("sbappendcontrol"); | |
655 | for (m = control; ; m = m->m_next) { | |
656 | space += m->m_len; | |
657 | if (m->m_next == 0) | |
658 | break; | |
659 | } | |
660 | n = m; /* save pointer to last control buffer */ | |
661 | for (m = m0; m; m = m->m_next) | |
662 | space += m->m_len; | |
663 | if (space > sbspace(sb)) | |
664 | return (0); | |
665 | n->m_next = m0; /* concatenate data to control */ | |
666 | for (m = control; m; m = m->m_next) | |
667 | sballoc(sb, m); | |
668 | if (n = sb->sb_mb) { | |
669 | while (n->m_nextpkt) | |
670 | n = n->m_nextpkt; | |
671 | n->m_nextpkt = control; | |
672 | } else | |
673 | sb->sb_mb = control; | |
674 | return (1); | |
675 | } | |
676 | ||
677 | /* | |
678 | * Compress mbuf chain m into the socket | |
679 | * buffer sb following mbuf n. If n | |
680 | * is null, the buffer is presumed empty. | |
681 | */ | |
4c45483e | 682 | void |
15637ed4 RG |
683 | sbcompress(sb, m, n) |
684 | register struct sockbuf *sb; | |
685 | register struct mbuf *m, *n; | |
686 | { | |
687 | register int eor = 0; | |
688 | register struct mbuf *o; | |
689 | ||
690 | while (m) { | |
691 | eor |= m->m_flags & M_EOR; | |
692 | if (m->m_len == 0 && | |
693 | (eor == 0 || | |
694 | (((o = m->m_next) || (o = n)) && | |
695 | o->m_type == m->m_type))) { | |
696 | m = m_free(m); | |
697 | continue; | |
698 | } | |
699 | if (n && (n->m_flags & (M_EXT | M_EOR)) == 0 && | |
700 | (n->m_data + n->m_len + m->m_len) < &n->m_dat[MLEN] && | |
701 | n->m_type == m->m_type) { | |
702 | bcopy(mtod(m, caddr_t), mtod(n, caddr_t) + n->m_len, | |
703 | (unsigned)m->m_len); | |
704 | n->m_len += m->m_len; | |
705 | sb->sb_cc += m->m_len; | |
706 | m = m_free(m); | |
707 | continue; | |
708 | } | |
709 | if (n) | |
710 | n->m_next = m; | |
711 | else | |
712 | sb->sb_mb = m; | |
713 | sballoc(sb, m); | |
714 | n = m; | |
715 | m->m_flags &= ~M_EOR; | |
716 | m = m->m_next; | |
717 | n->m_next = 0; | |
718 | } | |
719 | if (eor) { | |
720 | if (n) | |
721 | n->m_flags |= eor; | |
722 | else | |
723 | printf("semi-panic: sbcompress\n"); | |
724 | } | |
725 | } | |
726 | ||
727 | /* | |
728 | * Free all mbufs in a sockbuf. | |
729 | * Check that all resources are reclaimed. | |
730 | */ | |
4c45483e | 731 | void |
15637ed4 RG |
732 | sbflush(sb) |
733 | register struct sockbuf *sb; | |
734 | { | |
735 | ||
736 | if (sb->sb_flags & SB_LOCK) | |
737 | panic("sbflush"); | |
738 | while (sb->sb_mbcnt) | |
739 | sbdrop(sb, (int)sb->sb_cc); | |
740 | if (sb->sb_cc || sb->sb_mb) | |
741 | panic("sbflush 2"); | |
742 | } | |
743 | ||
744 | /* | |
745 | * Drop data from (the front of) a sockbuf. | |
746 | */ | |
4c45483e | 747 | void |
15637ed4 RG |
748 | sbdrop(sb, len) |
749 | register struct sockbuf *sb; | |
750 | register int len; | |
751 | { | |
752 | register struct mbuf *m, *mn; | |
753 | struct mbuf *next; | |
754 | ||
755 | next = (m = sb->sb_mb) ? m->m_nextpkt : 0; | |
756 | while (len > 0) { | |
757 | if (m == 0) { | |
758 | if (next == 0) | |
759 | panic("sbdrop"); | |
760 | m = next; | |
761 | next = m->m_nextpkt; | |
762 | continue; | |
763 | } | |
764 | if (m->m_len > len) { | |
765 | m->m_len -= len; | |
766 | m->m_data += len; | |
767 | sb->sb_cc -= len; | |
768 | break; | |
769 | } | |
770 | len -= m->m_len; | |
771 | sbfree(sb, m); | |
772 | MFREE(m, mn); | |
773 | m = mn; | |
774 | } | |
775 | while (m && m->m_len == 0) { | |
776 | sbfree(sb, m); | |
777 | MFREE(m, mn); | |
778 | m = mn; | |
779 | } | |
780 | if (m) { | |
781 | sb->sb_mb = m; | |
782 | m->m_nextpkt = next; | |
783 | } else | |
784 | sb->sb_mb = next; | |
785 | } | |
786 | ||
787 | /* | |
788 | * Drop a record off the front of a sockbuf | |
789 | * and move the next record to the front. | |
790 | */ | |
4c45483e | 791 | void |
15637ed4 RG |
792 | sbdroprecord(sb) |
793 | register struct sockbuf *sb; | |
794 | { | |
795 | register struct mbuf *m, *mn; | |
796 | ||
797 | m = sb->sb_mb; | |
798 | if (m) { | |
799 | sb->sb_mb = m->m_nextpkt; | |
800 | do { | |
801 | sbfree(sb, m); | |
802 | MFREE(m, mn); | |
803 | } while (m = mn); | |
804 | } | |
805 | } |