put vax headers in their place
[unix-history] / usr / src / sys / kern / uipc_socket2.c
... / ...
CommitLineData
1/* uipc_socket2.c 4.28 82/10/09 */
2
3#include "../h/param.h"
4#include "../h/systm.h"
5#include "../h/dir.h"
6#include "../h/user.h"
7#include "../h/proc.h"
8#include "../h/file.h"
9#include "../h/inode.h"
10#include "../h/buf.h"
11#include "../h/mbuf.h"
12#include "../h/protosw.h"
13#include "../h/socket.h"
14#include "../h/socketvar.h"
15
16/*
17 * Primitive routines for operating on sockets and socket buffers
18 */
19
20/*
21 * Procedures to manipulate state flags of socket
22 * and do appropriate wakeups. Normal sequence from the
23 * active (originating) side is that soisconnecting() is
24 * called during processing of connect() call,
25 * resulting in an eventual call to soisconnected() if/when the
26 * connection is established. When the connection is torn down
27 * soisdisconnecting() is called during processing of disconnect() call,
28 * and soisdisconnected() is called when the connection to the peer
29 * is totally severed. The semantics of these routines are such that
30 * connectionless protocols can call soisconnected() and soisdisconnected()
31 * only, bypassing the in-progress calls when setting up a ``connection''
32 * takes no time.
33 *
34 * From the passive side, a socket is created with SO_ACCEPTCONN
35 * creating two queues of sockets: so_q0 for connections in progress
36 * and so_q for connections already made and awaiting user acceptance.
37 * As a protocol is preparing incoming connections, it creates a socket
38 * structure queued on so_q0 by calling sonewconn(). When the connection
39 * is established, soisconnected() is called, and transfers the
40 * socket structure to so_q, making it available to accept().
41 *
42 * If a SO_ACCEPTCONN socket is closed with sockets on either
43 * so_q0 or so_q, these sockets are dropped.
44 *
45 * If and when higher level protocols are implemented in
46 * the kernel, the wakeups done here will sometimes
47 * be implemented as software-interrupt process scheduling.
48 */
49
50soisconnecting(so)
51 struct socket *so;
52{
53
54 so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING);
55 so->so_state |= SS_ISCONNECTING;
56 wakeup((caddr_t)&so->so_timeo);
57}
58
59soisconnected(so)
60 struct socket *so;
61{
62 register struct socket *head = so->so_head;
63
64 if (head) {
65 if (soqremque(so, 0) == 0)
66 panic("soisconnected");
67 soqinsque(head, so, 1);
68 wakeup((caddr_t)&head->so_timeo);
69 }
70 so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING);
71 so->so_state |= SS_ISCONNECTED;
72 wakeup((caddr_t)&so->so_timeo);
73 sorwakeup(so);
74 sowwakeup(so);
75}
76
77soisdisconnecting(so)
78 struct socket *so;
79{
80
81 so->so_state &= ~SS_ISCONNECTING;
82 so->so_state |= (SS_ISDISCONNECTING|SS_CANTRCVMORE|SS_CANTSENDMORE);
83 wakeup((caddr_t)&so->so_timeo);
84 sowwakeup(so);
85 sorwakeup(so);
86}
87
88soisdisconnected(so)
89 struct socket *so;
90{
91
92 so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING);
93 so->so_state |= (SS_CANTRCVMORE|SS_CANTSENDMORE);
94 wakeup((caddr_t)&so->so_timeo);
95 sowwakeup(so);
96 sorwakeup(so);
97}
98
99/*
100 * When an attempt at a new connection is noted on a socket
101 * which accepts connections, sonewconn is called. If the
102 * connection is possible (subject to space constraints, etc.)
103 * then we allocate a new structure, propoerly linked into the
104 * data structure of the original socket, and return this.
105 */
106struct socket *
107sonewconn(head)
108 register struct socket *head;
109{
110 register struct socket *so;
111 struct mbuf *m;
112
113 if (head->so_qlen + head->so_q0len > 3 * head->so_qlimit / 2)
114 goto bad;
115 m = m_getclr(M_DONTWAIT);
116 if (m == 0)
117 goto bad;
118 so = mtod(m, struct socket *);
119 so->so_type = head->so_type;
120 so->so_options = head->so_options &~ SO_ACCEPTCONN;
121 so->so_linger = head->so_linger;
122 so->so_state = head->so_state;
123 so->so_proto = head->so_proto;
124 so->so_timeo = head->so_timeo;
125 so->so_pgrp = head->so_pgrp;
126 soqinsque(head, so, 0);
127 if ((*so->so_proto->pr_usrreq)(so, PRU_ATTACH, 0, 0, 0)) {
128 (void) soqremque(so, 0);
129 m_free(m);
130 goto bad;
131 }
132 return (so);
133bad:
134 return ((struct socket *)0);
135}
136
137soqinsque(head, so, q)
138 register struct socket *head, *so;
139 int q;
140{
141
142 so->so_head = head;
143 if (q == 0) {
144 head->so_q0len++;
145 so->so_q0 = head->so_q0;
146 head->so_q0 = so;
147 } else {
148 head->so_qlen++;
149 so->so_q = head->so_q;
150 head->so_q = so;
151 }
152}
153
154soqremque(so, q)
155 register struct socket *so;
156 int q;
157{
158 register struct socket *head, *prev, *next;
159
160 head = so->so_head;
161 prev = head;
162 for (;;) {
163 next = q ? prev->so_q : prev->so_q0;
164 if (next == so)
165 break;
166 if (next == head)
167 return (0);
168 prev = next;
169 }
170 if (q == 0) {
171 prev->so_q0 = next->so_q0;
172 head->so_q0len--;
173 } else {
174 prev->so_q = next->so_q;
175 head->so_qlen--;
176 }
177 next->so_q0 = next->so_q = 0;
178 next->so_head = 0;
179 return (1);
180}
181
182/*
183 * Socantsendmore indicates that no more data will be sent on the
184 * socket; it would normally be applied to a socket when the user
185 * informs the system that no more data is to be sent, by the protocol
186 * code (in case PRU_SHUTDOWN). Socantrcvmore indicates that no more data
187 * will be received, and will normally be applied to the socket by a
188 * protocol when it detects that the peer will send no more data.
189 * Data queued for reading in the socket may yet be read.
190 */
191
192socantsendmore(so)
193 struct socket *so;
194{
195
196 so->so_state |= SS_CANTSENDMORE;
197 sowwakeup(so);
198}
199
200socantrcvmore(so)
201 struct socket *so;
202{
203
204 so->so_state |= SS_CANTRCVMORE;
205 sorwakeup(so);
206}
207
208/*
209 * Socket select/wakeup routines.
210 */
211
212/*
213 * Interface routine to select() system
214 * call for sockets.
215 */
216soselect(so, rw)
217 register struct socket *so;
218 int rw;
219{
220 int s = splnet();
221
222 switch (rw) {
223
224 case FREAD:
225 if (soreadable(so)) {
226 splx(s);
227 return (1);
228 }
229 sbselqueue(&so->so_rcv);
230 break;
231
232 case FWRITE:
233 if (sowriteable(so)) {
234 splx(s);
235 return (1);
236 }
237 sbselqueue(&so->so_snd);
238 break;
239 }
240 splx(s);
241 return (0);
242}
243
244/*
245 * Queue a process for a select on a socket buffer.
246 */
247sbselqueue(sb)
248 struct sockbuf *sb;
249{
250 register struct proc *p;
251
252 if ((p = sb->sb_sel) && p->p_wchan == (caddr_t)&selwait)
253 sb->sb_flags |= SB_COLL;
254 else
255 sb->sb_sel = u.u_procp;
256}
257
258/*
259 * Wait for data to arrive at/drain from a socket buffer.
260 */
261sbwait(sb)
262 struct sockbuf *sb;
263{
264
265 sb->sb_flags |= SB_WAIT;
266 sleep((caddr_t)&sb->sb_cc, PZERO+1);
267}
268
269/*
270 * Wakeup processes waiting on a socket buffer.
271 */
272sbwakeup(sb)
273 struct sockbuf *sb;
274{
275
276 if (sb->sb_sel) {
277 selwakeup(sb->sb_sel, sb->sb_flags & SB_COLL);
278 sb->sb_sel = 0;
279 sb->sb_flags &= ~SB_COLL;
280 }
281 if (sb->sb_flags & SB_WAIT) {
282 sb->sb_flags &= ~SB_WAIT;
283 wakeup((caddr_t)&sb->sb_cc);
284 }
285}
286
287/*
288 * Socket buffer (struct sockbuf) utility routines.
289 *
290 * Each socket contains two socket buffers: one for sending data and
291 * one for receiving data. Each buffer contains a queue of mbufs,
292 * information about the number of mbufs and amount of data in the
293 * queue, and other fields allowing select() statements and notification
294 * on data availability to be implemented.
295 *
296 * Before using a new socket structure it is first necessary to reserve
297 * buffer space to the socket, by calling sbreserve. This commits
298 * some of the available buffer space in the system buffer pool for the
299 * socket. The space should be released by calling sbrelease when the
300 * socket is destroyed.
301 *
302 * The routine sbappend() is normally called to append new mbufs
303 * to a socket buffer, after checking that adequate space is available
304 * comparing the function spspace() with the amount of data to be added.
305 * Data is normally removed from a socket buffer in a protocol by
306 * first calling m_copy on the socket buffer mbuf chain and sending this
307 * to a peer, and then removing the data from the socket buffer with
308 * sbdrop when the data is acknowledged by the peer (or immediately
309 * in the case of unreliable protocols.)
310 *
311 * Protocols which do not require connections place both source address
312 * and data information in socket buffer queues. The source addresses
313 * are stored in single mbufs after each data item, and are easily found
314 * as the data items are all marked with end of record markers. The
315 * sbappendaddr() routine stores a datum and associated address in
316 * a socket buffer. Note that, unlike sbappend(), this routine checks
317 * for the caller that there will be enough space to store the data.
318 * It fails if there is not enough space, or if it cannot find
319 * a mbuf to store the address in.
320 *
321 * The higher-level routines sosend and soreceive (in socket.c)
322 * also add data to, and remove data from socket buffers repectively.
323 */
324
325/*
326 * Allot mbufs to a sockbuf.
327 */
328sbreserve(sb, cc)
329 struct sockbuf *sb;
330{
331
332 /* someday maybe this routine will fail... */
333 sb->sb_hiwat = cc;
334 sb->sb_mbmax = cc*2;
335 return (1);
336}
337
338/*
339 * Free mbufs held by a socket, and reserved mbuf space.
340 */
341sbrelease(sb)
342 struct sockbuf *sb;
343{
344
345 sbflush(sb);
346 sb->sb_hiwat = sb->sb_mbmax = 0;
347}
348
349/*
350 * Routines to add (at the end) and remove (from the beginning)
351 * data from a mbuf queue.
352 */
353
354/*
355 * Append mbuf queue m to sockbuf sb.
356 */
357sbappend(sb, m)
358 register struct mbuf *m;
359 register struct sockbuf *sb;
360{
361 register struct mbuf *n;
362
363 n = sb->sb_mb;
364 if (n)
365 while (n->m_next)
366 n = n->m_next;
367 while (m) {
368 if (m->m_len == 0 && (int)m->m_act == 0) {
369 m = m_free(m);
370 continue;
371 }
372 if (n && n->m_off <= MMAXOFF && m->m_off <= MMAXOFF &&
373 (int)n->m_act == 0 && (int)m->m_act == 0 &&
374 (n->m_off + n->m_len + m->m_len) <= MMAXOFF) {
375 bcopy(mtod(m, caddr_t), mtod(n, caddr_t) + n->m_len,
376 (unsigned)m->m_len);
377 n->m_len += m->m_len;
378 sb->sb_cc += m->m_len;
379 m = m_free(m);
380 continue;
381 }
382 sballoc(sb, m);
383 if (n == 0)
384 sb->sb_mb = m;
385 else
386 n->m_next = m;
387 n = m;
388 m = m->m_next;
389 n->m_next = 0;
390 }
391}
392
393/*
394 * Append data and address.
395 * Return 0 if no space in sockbuf or if
396 * can't get mbuf to stuff address in.
397 */
398sbappendaddr(sb, asa, m0)
399 struct sockbuf *sb;
400 struct sockaddr *asa;
401 struct mbuf *m0;
402{
403 struct sockaddr *msa;
404 register struct mbuf *m;
405 register int len = sizeof (struct sockaddr);
406
407 m = m0;
408 if (m == 0)
409 panic("sbappendaddr");
410 for (;;) {
411 len += m->m_len;
412 if (m->m_next == 0) {
413 m->m_act = (struct mbuf *)1;
414 break;
415 }
416 m = m->m_next;
417 }
418 if (len > sbspace(sb))
419 return (0);
420 m = m_get(M_DONTWAIT);
421 if (m == 0)
422 return (0);
423 m->m_len = sizeof (struct sockaddr);
424 msa = mtod(m, struct sockaddr *);
425 *msa = *asa;
426 m->m_act = (struct mbuf *)1;
427 sbappend(sb, m);
428 sbappend(sb, m0);
429 return (1);
430}
431
432/*
433 * Free all mbufs on a sockbuf mbuf chain.
434 * Check that resource allocations return to 0.
435 */
436sbflush(sb)
437 struct sockbuf *sb;
438{
439
440 if (sb->sb_flags & SB_LOCK)
441 panic("sbflush");
442 if (sb->sb_cc)
443 sbdrop(sb, sb->sb_cc);
444 if (sb->sb_cc || sb->sb_mbcnt || sb->sb_mb)
445 panic("sbflush 2");
446}
447
448/*
449 * Drop data from (the front of) a sockbuf chain.
450 */
451sbdrop(sb, len)
452 register struct sockbuf *sb;
453 register int len;
454{
455 register struct mbuf *m = sb->sb_mb, *mn;
456
457 while (len > 0) {
458 if (m == 0)
459 panic("sbdrop");
460 if (m->m_len > len) {
461 m->m_len -= len;
462 m->m_off += len;
463 sb->sb_cc -= len;
464 break;
465 }
466 len -= m->m_len;
467 sbfree(sb, m);
468 MFREE(m, mn);
469 m = mn;
470 }
471 sb->sb_mb = m;
472}