ec_rxstart doesn't eists
[unix-history] / usr / src / sys / kern / uipc_socket.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 1982, 1986, 1988, 1990 Regents of the University of California.
3 * All rights reserved.
4 *
5 * %sccs.include.redist.c%
6 *
7 * @(#)uipc_socket.c 7.31 (Berkeley) %G%
8 */
9
10#include "param.h"
11#include "proc.h"
12#include "file.h"
13#include "malloc.h"
14#include "mbuf.h"
15#include "domain.h"
16#include "kernel.h"
17#include "protosw.h"
18#include "socket.h"
19#include "socketvar.h"
20#include "resourcevar.h"
21
22/*
23 * Socket operation routines.
24 * These routines are called by the routines in
25 * sys_socket.c or from a system process, and
26 * implement the semantics of socket operations by
27 * switching out to the protocol specific routines.
28 */
29/*ARGSUSED*/
30socreate(dom, aso, type, proto)
31 struct socket **aso;
32 register int type;
33 int proto;
34{
35 struct proc *p = curproc; /* XXX */
36 register struct protosw *prp;
37 register struct socket *so;
38 register int error;
39
40 if (proto)
41 prp = pffindproto(dom, proto, type);
42 else
43 prp = pffindtype(dom, type);
44 if (prp == 0)
45 return (EPROTONOSUPPORT);
46 if (prp->pr_type != type)
47 return (EPROTOTYPE);
48 MALLOC(so, struct socket *, sizeof(*so), M_SOCKET, M_WAIT);
49 bzero((caddr_t)so, sizeof(*so));
50 so->so_type = type;
51 if (p->p_ucred->cr_uid == 0)
52 so->so_state = SS_PRIV;
53 so->so_proto = prp;
54 error =
55 (*prp->pr_usrreq)(so, PRU_ATTACH,
56 (struct mbuf *)0, (struct mbuf *)proto, (struct mbuf *)0);
57 if (error) {
58 so->so_state |= SS_NOFDREF;
59 sofree(so);
60 return (error);
61 }
62 *aso = so;
63 return (0);
64}
65
66sobind(so, nam)
67 struct socket *so;
68 struct mbuf *nam;
69{
70 int s = splnet();
71 int error;
72
73 error =
74 (*so->so_proto->pr_usrreq)(so, PRU_BIND,
75 (struct mbuf *)0, nam, (struct mbuf *)0);
76 splx(s);
77 return (error);
78}
79
80solisten(so, backlog)
81 register struct socket *so;
82 int backlog;
83{
84 int s = splnet(), error;
85
86 error =
87 (*so->so_proto->pr_usrreq)(so, PRU_LISTEN,
88 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0);
89 if (error) {
90 splx(s);
91 return (error);
92 }
93 if (so->so_q == 0)
94 so->so_options |= SO_ACCEPTCONN;
95 if (backlog < 0)
96 backlog = 0;
97 so->so_qlimit = min(backlog, SOMAXCONN);
98 splx(s);
99 return (0);
100}
101
102sofree(so)
103 register struct socket *so;
104{
105
106 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0)
107 return;
108 if (so->so_head) {
109 if (!soqremque(so, 0) && !soqremque(so, 1))
110 panic("sofree dq");
111 so->so_head = 0;
112 }
113 sbrelease(&so->so_snd);
114 sorflush(so);
115 FREE(so, M_SOCKET);
116}
117
118/*
119 * Close a socket on last file table reference removal.
120 * Initiate disconnect if connected.
121 * Free socket when disconnect complete.
122 */
123soclose(so)
124 register struct socket *so;
125{
126 int s = splnet(); /* conservative */
127 int error = 0;
128
129 if (so->so_options & SO_ACCEPTCONN) {
130 while (so->so_q0)
131 (void) soabort(so->so_q0);
132 while (so->so_q)
133 (void) soabort(so->so_q);
134 }
135 if (so->so_pcb == 0)
136 goto discard;
137 if (so->so_state & SS_ISCONNECTED) {
138 if ((so->so_state & SS_ISDISCONNECTING) == 0) {
139 error = sodisconnect(so);
140 if (error)
141 goto drop;
142 }
143 if (so->so_options & SO_LINGER) {
144 if ((so->so_state & SS_ISDISCONNECTING) &&
145 (so->so_state & SS_NBIO))
146 goto drop;
147 while (so->so_state & SS_ISCONNECTED)
148 if (error = tsleep((caddr_t)&so->so_timeo,
149 PSOCK | PCATCH, netcls, so->so_linger))
150 break;
151 }
152 }
153drop:
154 if (so->so_pcb) {
155 int error2 =
156 (*so->so_proto->pr_usrreq)(so, PRU_DETACH,
157 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0);
158 if (error == 0)
159 error = error2;
160 }
161discard:
162 if (so->so_state & SS_NOFDREF)
163 panic("soclose: NOFDREF");
164 so->so_state |= SS_NOFDREF;
165 sofree(so);
166 splx(s);
167 return (error);
168}
169
170/*
171 * Must be called at splnet...
172 */
173soabort(so)
174 struct socket *so;
175{
176
177 return (
178 (*so->so_proto->pr_usrreq)(so, PRU_ABORT,
179 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0));
180}
181
182soaccept(so, nam)
183 register struct socket *so;
184 struct mbuf *nam;
185{
186 int s = splnet();
187 int error;
188
189 if ((so->so_state & SS_NOFDREF) == 0)
190 panic("soaccept: !NOFDREF");
191 so->so_state &= ~SS_NOFDREF;
192 error = (*so->so_proto->pr_usrreq)(so, PRU_ACCEPT,
193 (struct mbuf *)0, nam, (struct mbuf *)0);
194 splx(s);
195 return (error);
196}
197
198soconnect(so, nam)
199 register struct socket *so;
200 struct mbuf *nam;
201{
202 int s;
203 int error;
204
205 if (so->so_options & SO_ACCEPTCONN)
206 return (EOPNOTSUPP);
207 s = splnet();
208 /*
209 * If protocol is connection-based, can only connect once.
210 * Otherwise, if connected, try to disconnect first.
211 * This allows user to disconnect by connecting to, e.g.,
212 * a null address.
213 */
214 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
215 ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
216 (error = sodisconnect(so))))
217 error = EISCONN;
218 else
219 error = (*so->so_proto->pr_usrreq)(so, PRU_CONNECT,
220 (struct mbuf *)0, nam, (struct mbuf *)0);
221 splx(s);
222 return (error);
223}
224
225soconnect2(so1, so2)
226 register struct socket *so1;
227 struct socket *so2;
228{
229 int s = splnet();
230 int error;
231
232 error = (*so1->so_proto->pr_usrreq)(so1, PRU_CONNECT2,
233 (struct mbuf *)0, (struct mbuf *)so2, (struct mbuf *)0);
234 splx(s);
235 return (error);
236}
237
238sodisconnect(so)
239 register struct socket *so;
240{
241 int s = splnet();
242 int error;
243
244 if ((so->so_state & SS_ISCONNECTED) == 0) {
245 error = ENOTCONN;
246 goto bad;
247 }
248 if (so->so_state & SS_ISDISCONNECTING) {
249 error = EALREADY;
250 goto bad;
251 }
252 error = (*so->so_proto->pr_usrreq)(so, PRU_DISCONNECT,
253 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0);
254bad:
255 splx(s);
256 return (error);
257}
258
259#define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
260/*
261 * Send on a socket.
262 * If send must go all at once and message is larger than
263 * send buffering, then hard error.
264 * Lock against other senders.
265 * If must go all at once and not enough room now, then
266 * inform user that this would block and do nothing.
267 * Otherwise, if nonblocking, send as much as possible.
268 * The data to be sent is described by "uio" if nonzero,
269 * otherwise by the mbuf chain "top" (which must be null
270 * if uio is not). Data provided in mbuf chain must be small
271 * enough to send all at once.
272 *
273 * Returns nonzero on error, timeout or signal; callers
274 * must check for short counts if EINTR/ERESTART are returned.
275 * Data and control buffers are freed on return.
276 */
277sosend(so, addr, uio, top, control, flags)
278 register struct socket *so;
279 struct mbuf *addr;
280 struct uio *uio;
281 struct mbuf *top;
282 struct mbuf *control;
283 int flags;
284{
285 struct proc *p = curproc; /* XXX */
286 struct mbuf **mp;
287 register struct mbuf *m;
288 register long space, len, resid;
289 int clen = 0, error, s, dontroute, mlen;
290 int atomic = sosendallatonce(so) || top;
291
292 if (uio)
293 resid = uio->uio_resid;
294 else
295 resid = top->m_pkthdr.len;
296 dontroute =
297 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
298 (so->so_proto->pr_flags & PR_ATOMIC);
299 p->p_stats->p_ru.ru_msgsnd++;
300 if (control)
301 clen = control->m_len;
302#define snderr(errno) { error = errno; splx(s); goto release; }
303
304restart:
305 if (error = sblock(&so->so_snd, SBLOCKWAIT(flags)))
306 goto out;
307 do {
308 s = splnet();
309 if (so->so_state & SS_CANTSENDMORE)
310 snderr(EPIPE);
311 if (so->so_error)
312 snderr(so->so_error);
313 if ((so->so_state & SS_ISCONNECTED) == 0) {
314 if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
315 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
316 !(resid == 0 && clen != 0))
317 snderr(ENOTCONN);
318 } else if (addr == 0)
319 snderr(EDESTADDRREQ);
320 }
321 space = sbspace(&so->so_snd);
322 if (flags & MSG_OOB)
323 space += 1024;
324 if (atomic && resid > so->so_snd.sb_hiwat ||
325 clen > so->so_snd.sb_hiwat)
326 snderr(EMSGSIZE);
327 if (space < resid + clen && uio &&
328 (atomic || space < so->so_snd.sb_lowat || space < clen)) {
329 if (so->so_state & SS_NBIO)
330 snderr(EWOULDBLOCK);
331 sbunlock(&so->so_snd);
332 error = sbwait(&so->so_snd);
333 splx(s);
334 if (error)
335 goto out;
336 goto restart;
337 }
338 splx(s);
339 mp = &top;
340 space -= clen;
341 do {
342 if (uio == NULL) {
343 /*
344 * Data is prepackaged in "top".
345 */
346 resid = 0;
347 if (flags & MSG_EOR)
348 top->m_flags |= M_EOR;
349 } else do {
350 if (top == 0) {
351 MGETHDR(m, M_WAIT, MT_DATA);
352 mlen = MHLEN;
353 m->m_pkthdr.len = 0;
354 m->m_pkthdr.rcvif = (struct ifnet *)0;
355 } else {
356 MGET(m, M_WAIT, MT_DATA);
357 mlen = MLEN;
358 }
359 if (resid >= MINCLSIZE && space >= MCLBYTES) {
360 MCLGET(m, M_WAIT);
361 if ((m->m_flags & M_EXT) == 0)
362 goto nopages;
363 mlen = MCLBYTES;
364#ifdef MAPPED_MBUFS
365 len = min(MCLBYTES, resid);
366#else
367 if (top == 0) {
368 len = min(MCLBYTES - max_hdr, resid);
369 m->m_data += max_hdr;
370 } else
371 len = min(MCLBYTES, resid);
372#endif
373 space -= MCLBYTES;
374 } else {
375nopages:
376 len = min(min(mlen, resid), space);
377 space -= len;
378 /*
379 * For datagram protocols, leave room
380 * for protocol headers in first mbuf.
381 */
382 if (atomic && top == 0 && len < mlen)
383 MH_ALIGN(m, len);
384 }
385 error = uiomove(mtod(m, caddr_t), (int)len, uio);
386 resid = uio->uio_resid;
387 m->m_len = len;
388 *mp = m;
389 top->m_pkthdr.len += len;
390 if (error)
391 goto release;
392 mp = &m->m_next;
393 if (resid <= 0) {
394 if (flags & MSG_EOR)
395 top->m_flags |= M_EOR;
396 break;
397 }
398 } while (space > 0 && atomic);
399 if (dontroute)
400 so->so_options |= SO_DONTROUTE;
401 s = splnet(); /* XXX */
402 error = (*so->so_proto->pr_usrreq)(so,
403 (flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND,
404 top, addr, control);
405 splx(s);
406 if (dontroute)
407 so->so_options &= ~SO_DONTROUTE;
408 clen = 0;
409 control = 0;
410 top = 0;
411 mp = &top;
412 if (error)
413 goto release;
414 } while (resid && space > 0);
415 } while (resid);
416
417release:
418 sbunlock(&so->so_snd);
419out:
420 if (top)
421 m_freem(top);
422 if (control)
423 m_freem(control);
424 return (error);
425}
426
427/*
428 * Implement receive operations on a socket.
429 * We depend on the way that records are added to the sockbuf
430 * by sbappend*. In particular, each record (mbufs linked through m_next)
431 * must begin with an address if the protocol so specifies,
432 * followed by an optional mbuf or mbufs containing ancillary data,
433 * and then zero or more mbufs of data.
434 * In order to avoid blocking network interrupts for the entire time here,
435 * we splx() while doing the actual copy to user space.
436 * Although the sockbuf is locked, new data may still be appended,
437 * and thus we must maintain consistency of the sockbuf during that time.
438 *
439 * The caller may receive the data as a single mbuf chain by supplying
440 * an mbuf **mp0 for use in returning the chain. The uio is then used
441 * only for the count in uio_resid.
442 */
443soreceive(so, paddr, uio, mp0, controlp, flagsp)
444 register struct socket *so;
445 struct mbuf **paddr;
446 struct uio *uio;
447 struct mbuf **mp0;
448 struct mbuf **controlp;
449 int *flagsp;
450{
451 struct proc *p = curproc; /* XXX */
452 register struct mbuf *m, **mp;
453 register int flags, len, error, s, offset;
454 struct protosw *pr = so->so_proto;
455 struct mbuf *nextrecord;
456 int moff, type;
457
458 mp = mp0;
459 if (paddr)
460 *paddr = 0;
461 if (controlp)
462 *controlp = 0;
463 if (flagsp)
464 flags = *flagsp &~ MSG_EOR;
465 else
466 flags = 0;
467 if (flags & MSG_OOB) {
468 m = m_get(M_WAIT, MT_DATA);
469 error = (*pr->pr_usrreq)(so, PRU_RCVOOB,
470 m, (struct mbuf *)(flags & MSG_PEEK), (struct mbuf *)0);
471 if (error)
472 goto bad;
473 do {
474 error = uiomove(mtod(m, caddr_t),
475 (int) min(uio->uio_resid, m->m_len), uio);
476 m = m_free(m);
477 } while (uio->uio_resid && error == 0 && m);
478bad:
479 if (m)
480 m_freem(m);
481 return (error);
482 }
483 if (mp)
484 *mp = (struct mbuf *)0;
485 if (so->so_state & SS_ISCONFIRMING && uio->uio_resid)
486 (*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0,
487 (struct mbuf *)0, (struct mbuf *)0);
488
489restart:
490 if (error = sblock(&so->so_rcv, SBLOCKWAIT(flags)))
491 return (error);
492 s = splnet();
493
494 m = so->so_rcv.sb_mb;
495 /*
496 * If we have less data than requested, block awaiting more
497 * (subject to any timeout) if:
498 * 1. the current count is less than the low water mark, or
499 * 2. MSG_WAITALL is set, and it is possible to do the entire
500 * receive operation at once if we block (resid <= hiwat).
501 * 3. MSG_DONTWAIT is not set
502 * If MSG_WAITALL is set but resid is larger than the receive buffer,
503 * we have to do the receive in sections, and thus risk returning
504 * a short count if a timeout or signal occurs after we start.
505 */
506 if (m == 0 || ((flags & MSG_DONTWAIT) == 0 &&
507 so->so_rcv.sb_cc < uio->uio_resid) &&
508 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
509 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)))
510 if (m && (m->m_nextpkt || (m->m_flags & M_EOR) ||
511 m->m_type == MT_OOBDATA || m->m_type == MT_CONTROL))
512 break;
513#ifdef DIAGNOSTIC
514 if (m == 0 && so->so_rcv.sb_cc)
515 panic("receive 1");
516#endif
517 if (so->so_error) {
518 if (m)
519 goto dontblock;
520 error = so->so_error;
521 if ((flags & MSG_PEEK) == 0)
522 so->so_error = 0;
523 goto release;
524 }
525 if (so->so_state & SS_CANTRCVMORE) {
526 if (m)
527 goto dontblock;
528 else
529 goto release;
530 }
531 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
532 (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
533 error = ENOTCONN;
534 goto release;
535 }
536 if (uio->uio_resid == 0)
537 goto release;
538 if ((so->so_state & SS_NBIO) || (flags & MSG_DONTWAIT)) {
539 error = EWOULDBLOCK;
540 goto release;
541 }
542 sbunlock(&so->so_rcv);
543 error = sbwait(&so->so_rcv);
544 splx(s);
545 if (error)
546 return (error);
547 goto restart;
548 }
549 p->p_stats->p_ru.ru_msgrcv++;
550 nextrecord = m->m_nextpkt;
551 record_eor = m->m_flags & M_EOR;
552 if (pr->pr_flags & PR_ADDR) {
553#ifdef DIAGNOSTIC
554 if (m->m_type != MT_SONAME)
555 panic("receive 1a");
556#endif
557 if (flags & MSG_PEEK) {
558 if (paddr)
559 *paddr = m_copy(m, 0, m->m_len);
560 m = m->m_next;
561 } else {
562 sbfree(&so->so_rcv, m);
563 if (paddr) {
564 *paddr = m;
565 so->so_rcv.sb_mb = m->m_next;
566 m->m_next = 0;
567 m = so->so_rcv.sb_mb;
568 } else {
569 MFREE(m, so->so_rcv.sb_mb);
570 m = so->so_rcv.sb_mb;
571 }
572 }
573 }
574 while (m && m->m_type == MT_CONTROL && error == 0) {
575 if (flags & MSG_PEEK) {
576 if (controlp)
577 *controlp = m_copy(m, 0, m->m_len);
578 m = m->m_next;
579 } else {
580 sbfree(&so->so_rcv, m);
581 if (controlp) {
582 if (pr->pr_domain->dom_externalize &&
583 mtod(m, struct cmsghdr *)->cmsg_type ==
584 SCM_RIGHTS)
585 error = (*pr->pr_domain->dom_externalize)(m);
586 *controlp = m;
587 so->so_rcv.sb_mb = m->m_next;
588 m->m_next = 0;
589 m = so->so_rcv.sb_mb;
590 } else {
591 MFREE(m, so->so_rcv.sb_mb);
592 m = so->so_rcv.sb_mb;
593 }
594 }
595 if (controlp)
596 controlp = &(*controlp)->m_next;
597 }
598 if (m) {
599 if ((flags & MSG_PEEK) == 0)
600 m->m_nextpkt = nextrecord;
601 type = m->m_type;
602 if (type == MT_OOBDATA)
603 flags |= MSG_OOB;
604 }
605 moff = 0;
606 offset = 0;
607 while (m && uio->uio_resid > 0 && error == 0) {
608 if (m->m_type == MT_OOBDATA) {
609 if (type != MT_OOBDATA)
610 break;
611 } else if (type == MT_OOBDATA)
612 break;
613#ifdef DIAGNOSTIC
614 else if (m->m_type != MT_DATA && m->m_type != MT_HEADER)
615 panic("receive 3");
616#endif
617 so->so_state &= ~SS_RCVATMARK;
618 len = uio->uio_resid;
619 if (so->so_oobmark && len > so->so_oobmark - offset)
620 len = so->so_oobmark - offset;
621 if (len > m->m_len - moff)
622 len = m->m_len - moff;
623 /*
624 * If mp is set, just pass back the mbufs.
625 * Otherwise copy them out via the uio, then free.
626 * Sockbuf must be consistent here (points to current mbuf,
627 * it points to next record) when we drop priority;
628 * we must note any additions to the sockbuf when we
629 * block interrupts again.
630 */
631 if (mp == 0) {
632 splx(s);
633 error = uiomove(mtod(m, caddr_t) + moff, (int)len, uio);
634 s = splnet();
635 } else
636 uio->uio_resid -= len;
637 if (len == m->m_len - moff) {
638 if (flags & MSG_PEEK) {
639 m = m->m_next;
640 moff = 0;
641 } else {
642 nextrecord = m->m_nextpkt;
643 sbfree(&so->so_rcv, m);
644 if (mp) {
645 *mp = m;
646 mp = &m->m_next;
647 so->so_rcv.sb_mb = m = m->m_next;
648 *mp = (struct mbuf *)0;
649 } else {
650 MFREE(m, so->so_rcv.sb_mb);
651 m = so->so_rcv.sb_mb;
652 }
653 if (m)
654 m->m_nextpkt = nextrecord;
655 }
656 } else {
657 if (flags & MSG_PEEK)
658 moff += len;
659 else {
660 if (mp)
661 *mp = m_copym(m, 0, len, M_WAIT);
662 m->m_data += len;
663 m->m_len -= len;
664 so->so_rcv.sb_cc -= len;
665 }
666 }
667 if (so->so_oobmark) {
668 if ((flags & MSG_PEEK) == 0) {
669 so->so_oobmark -= len;
670 if (so->so_oobmark == 0) {
671 so->so_state |= SS_RCVATMARK;
672 break;
673 }
674 } else
675 offset += len;
676 }
677 if (m == 0 && record_eor) {
678 flags |= record_eor;
679 break;
680 }
681 /*
682 * If the MSG_WAITALL flag is set (for non-atomic socket),
683 * we must not quit until "uio->uio_resid == 0" or an error
684 * termination. If a signal/timeout occurs, return
685 * with a short count but without error.
686 * Keep sockbuf locked against other readers.
687 */
688 while (flags & MSG_WAITALL && m == 0 && uio->uio_resid > 0 &&
689 !(flags & MSG_OOB) && !sosendallatonce(so)) {
690 if (so->so_error || so->so_state & SS_CANTRCVMORE)
691 break;
692 error = sbwait(&so->so_rcv);
693 if (error) {
694 sbunlock(&so->so_rcv);
695 splx(s);
696 return (0);
697 }
698 if (m = so->so_rcv.sb_mb) {
699 nextrecord = m->m_nextpkt;
700 record_eor |= m->m_flags & M_EOR;
701 }
702 }
703 }
704 if ((flags & MSG_PEEK) == 0) {
705 if (m == 0)
706 so->so_rcv.sb_mb = nextrecord;
707 else if (pr->pr_flags & PR_ATOMIC) {
708 flags |= MSG_TRUNC;
709 (void) sbdroprecord(&so->so_rcv);
710 }
711 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
712 (*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0,
713 (struct mbuf *)flags, (struct mbuf *)0,
714 (struct mbuf *)0);
715 }
716 if (flagsp)
717 *flagsp |= flags;
718release:
719 sbunlock(&so->so_rcv);
720 splx(s);
721 return (error);
722}
723
724soshutdown(so, how)
725 register struct socket *so;
726 register int how;
727{
728 register struct protosw *pr = so->so_proto;
729
730 how++;
731 if (how & FREAD)
732 sorflush(so);
733 if (how & FWRITE)
734 return ((*pr->pr_usrreq)(so, PRU_SHUTDOWN,
735 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0));
736 return (0);
737}
738
739sorflush(so)
740 register struct socket *so;
741{
742 register struct sockbuf *sb = &so->so_rcv;
743 register struct protosw *pr = so->so_proto;
744 register int s;
745 struct sockbuf asb;
746
747 sb->sb_flags |= SB_NOINTR;
748 (void) sblock(sb, M_WAITOK);
749 s = splimp();
750 socantrcvmore(so);
751 sbunlock(sb);
752 asb = *sb;
753 bzero((caddr_t)sb, sizeof (*sb));
754 splx(s);
755 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose)
756 (*pr->pr_domain->dom_dispose)(asb.sb_mb);
757 sbrelease(&asb);
758}
759
760sosetopt(so, level, optname, m0)
761 register struct socket *so;
762 int level, optname;
763 struct mbuf *m0;
764{
765 int error = 0;
766 register struct mbuf *m = m0;
767
768 if (level != SOL_SOCKET) {
769 if (so->so_proto && so->so_proto->pr_ctloutput)
770 return ((*so->so_proto->pr_ctloutput)
771 (PRCO_SETOPT, so, level, optname, &m0));
772 error = ENOPROTOOPT;
773 } else {
774 switch (optname) {
775
776 case SO_LINGER:
777 if (m == NULL || m->m_len != sizeof (struct linger)) {
778 error = EINVAL;
779 goto bad;
780 }
781 so->so_linger = mtod(m, struct linger *)->l_linger;
782 /* fall thru... */
783
784 case SO_DEBUG:
785 case SO_KEEPALIVE:
786 case SO_DONTROUTE:
787 case SO_USELOOPBACK:
788 case SO_BROADCAST:
789 case SO_REUSEADDR:
790 case SO_OOBINLINE:
791 if (m == NULL || m->m_len < sizeof (int)) {
792 error = EINVAL;
793 goto bad;
794 }
795 if (*mtod(m, int *))
796 so->so_options |= optname;
797 else
798 so->so_options &= ~optname;
799 break;
800
801 case SO_SNDBUF:
802 case SO_RCVBUF:
803 case SO_SNDLOWAT:
804 case SO_RCVLOWAT:
805 if (m == NULL || m->m_len < sizeof (int)) {
806 error = EINVAL;
807 goto bad;
808 }
809 switch (optname) {
810
811 case SO_SNDBUF:
812 case SO_RCVBUF:
813 if (sbreserve(optname == SO_SNDBUF ?
814 &so->so_snd : &so->so_rcv,
815 (u_long) *mtod(m, int *)) == 0) {
816 error = ENOBUFS;
817 goto bad;
818 }
819 break;
820
821 case SO_SNDLOWAT:
822 so->so_snd.sb_lowat = *mtod(m, int *);
823 break;
824 case SO_RCVLOWAT:
825 so->so_rcv.sb_lowat = *mtod(m, int *);
826 break;
827 }
828 break;
829
830 case SO_SNDTIMEO:
831 case SO_RCVTIMEO:
832 {
833 struct timeval *tv;
834 short val;
835
836 if (m == NULL || m->m_len < sizeof (*tv)) {
837 error = EINVAL;
838 goto bad;
839 }
840 tv = mtod(m, struct timeval *);
841 if (tv->tv_sec > SHRT_MAX / hz - hz) {
842 error = EDOM;
843 goto bad;
844 }
845 val = tv->tv_sec * hz + tv->tv_usec / tick;
846
847 switch (optname) {
848
849 case SO_SNDTIMEO:
850 so->so_snd.sb_timeo = val;
851 break;
852 case SO_RCVTIMEO:
853 so->so_rcv.sb_timeo = val;
854 break;
855 }
856 break;
857 }
858
859 default:
860 error = ENOPROTOOPT;
861 break;
862 }
863 m = 0;
864 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput)
865 (void) ((*so->so_proto->pr_ctloutput)
866 (PRCO_SETOPT, so, level, optname, &m0));
867 }
868bad:
869 if (m)
870 (void) m_free(m);
871 return (error);
872}
873
874sogetopt(so, level, optname, mp)
875 register struct socket *so;
876 int level, optname;
877 struct mbuf **mp;
878{
879 register struct mbuf *m;
880
881 if (level != SOL_SOCKET) {
882 if (so->so_proto && so->so_proto->pr_ctloutput) {
883 return ((*so->so_proto->pr_ctloutput)
884 (PRCO_GETOPT, so, level, optname, mp));
885 } else
886 return (ENOPROTOOPT);
887 } else {
888 m = m_get(M_WAIT, MT_SOOPTS);
889 m->m_len = sizeof (int);
890
891 switch (optname) {
892
893 case SO_LINGER:
894 m->m_len = sizeof (struct linger);
895 mtod(m, struct linger *)->l_onoff =
896 so->so_options & SO_LINGER;
897 mtod(m, struct linger *)->l_linger = so->so_linger;
898 break;
899
900 case SO_USELOOPBACK:
901 case SO_DONTROUTE:
902 case SO_DEBUG:
903 case SO_KEEPALIVE:
904 case SO_REUSEADDR:
905 case SO_BROADCAST:
906 case SO_OOBINLINE:
907 *mtod(m, int *) = so->so_options & optname;
908 break;
909
910 case SO_TYPE:
911 *mtod(m, int *) = so->so_type;
912 break;
913
914 case SO_ERROR:
915 *mtod(m, int *) = so->so_error;
916 so->so_error = 0;
917 break;
918
919 case SO_SNDBUF:
920 *mtod(m, int *) = so->so_snd.sb_hiwat;
921 break;
922
923 case SO_RCVBUF:
924 *mtod(m, int *) = so->so_rcv.sb_hiwat;
925 break;
926
927 case SO_SNDLOWAT:
928 *mtod(m, int *) = so->so_snd.sb_lowat;
929 break;
930
931 case SO_RCVLOWAT:
932 *mtod(m, int *) = so->so_rcv.sb_lowat;
933 break;
934
935 case SO_SNDTIMEO:
936 case SO_RCVTIMEO:
937 {
938 int val = (optname == SO_SNDTIMEO ?
939 so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
940
941 m->m_len = sizeof(struct timeval);
942 mtod(m, struct timeval *)->tv_sec = val / hz;
943 mtod(m, struct timeval *)->tv_usec =
944 (val % hz) / tick;
945 break;
946 }
947
948 default:
949 (void)m_free(m);
950 return (ENOPROTOOPT);
951 }
952 *mp = m;
953 return (0);
954 }
955}
956
957sohasoutofband(so)
958 register struct socket *so;
959{
960 struct proc *p;
961
962 if (so->so_pgid < 0)
963 gsignal(-so->so_pgid, SIGURG);
964 else if (so->so_pgid > 0 && (p = pfind(so->so_pgid)) != 0)
965 psignal(p, SIGURG);
966 if (so->so_rcv.sb_sel) {
967 selwakeup(so->so_rcv.sb_sel, so->so_rcv.sb_flags & SB_COLL);
968 so->so_rcv.sb_sel = 0;
969 so->so_rcv.sb_flags &= ~SB_COLL;
970 }
971}