Commit | Line | Data |
---|---|---|
3a35e7af | 1 | /* uipc_socket2.c 4.28 82/10/09 */ |
681ebb17 BJ |
2 | |
3 | #include "../h/param.h" | |
4 | #include "../h/systm.h" | |
5 | #include "../h/dir.h" | |
6 | #include "../h/user.h" | |
7 | #include "../h/proc.h" | |
8 | #include "../h/file.h" | |
9 | #include "../h/inode.h" | |
10 | #include "../h/buf.h" | |
11 | #include "../h/mbuf.h" | |
681ebb17 BJ |
12 | #include "../h/protosw.h" |
13 | #include "../h/socket.h" | |
14 | #include "../h/socketvar.h" | |
681ebb17 BJ |
15 | |
16 | /* | |
17 | * Primitive routines for operating on sockets and socket buffers | |
18 | */ | |
19 | ||
20 | /* | |
21 | * Procedures to manipulate state flags of socket | |
2deddea9 BJ |
22 | * and do appropriate wakeups. Normal sequence from the |
23 | * active (originating) side is that soisconnecting() is | |
24 | * called during processing of connect() call, | |
4c078bb2 BJ |
25 | * resulting in an eventual call to soisconnected() if/when the |
26 | * connection is established. When the connection is torn down | |
27 | * soisdisconnecting() is called during processing of disconnect() call, | |
28 | * and soisdisconnected() is called when the connection to the peer | |
29 | * is totally severed. The semantics of these routines are such that | |
30 | * connectionless protocols can call soisconnected() and soisdisconnected() | |
31 | * only, bypassing the in-progress calls when setting up a ``connection'' | |
32 | * takes no time. | |
33 | * | |
2deddea9 BJ |
34 | * From the passive side, a socket is created with SO_ACCEPTCONN |
35 | * creating two queues of sockets: so_q0 for connections in progress | |
36 | * and so_q for connections already made and awaiting user acceptance. | |
37 | * As a protocol is preparing incoming connections, it creates a socket | |
38 | * structure queued on so_q0 by calling sonewconn(). When the connection | |
39 | * is established, soisconnected() is called, and transfers the | |
40 | * socket structure to so_q, making it available to accept(). | |
41 | * | |
42 | * If a SO_ACCEPTCONN socket is closed with sockets on either | |
43 | * so_q0 or so_q, these sockets are dropped. | |
44 | * | |
45 | * If and when higher level protocols are implemented in | |
4c078bb2 BJ |
46 | * the kernel, the wakeups done here will sometimes |
47 | * be implemented as software-interrupt process scheduling. | |
681ebb17 | 48 | */ |
4c078bb2 | 49 | |
681ebb17 BJ |
50 | soisconnecting(so) |
51 | struct socket *so; | |
52 | { | |
53 | ||
54 | so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING); | |
55 | so->so_state |= SS_ISCONNECTING; | |
56 | wakeup((caddr_t)&so->so_timeo); | |
57 | } | |
58 | ||
59 | soisconnected(so) | |
60 | struct socket *so; | |
61 | { | |
2deddea9 | 62 | register struct socket *head = so->so_head; |
681ebb17 | 63 | |
2deddea9 BJ |
64 | if (head) { |
65 | if (soqremque(so, 0) == 0) | |
66 | panic("soisconnected"); | |
67 | soqinsque(head, so, 1); | |
68 | wakeup((caddr_t)&head->so_timeo); | |
69 | } | |
681ebb17 BJ |
70 | so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING); |
71 | so->so_state |= SS_ISCONNECTED; | |
72 | wakeup((caddr_t)&so->so_timeo); | |
f957a49a BJ |
73 | sorwakeup(so); |
74 | sowwakeup(so); | |
681ebb17 BJ |
75 | } |
76 | ||
77 | soisdisconnecting(so) | |
78 | struct socket *so; | |
79 | { | |
80 | ||
72857acf | 81 | so->so_state &= ~SS_ISCONNECTING; |
681ebb17 BJ |
82 | so->so_state |= (SS_ISDISCONNECTING|SS_CANTRCVMORE|SS_CANTSENDMORE); |
83 | wakeup((caddr_t)&so->so_timeo); | |
4c078bb2 | 84 | sowwakeup(so); |
b454c3ea | 85 | sorwakeup(so); |
681ebb17 BJ |
86 | } |
87 | ||
88 | soisdisconnected(so) | |
89 | struct socket *so; | |
90 | { | |
91 | ||
92 | so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING); | |
93 | so->so_state |= (SS_CANTRCVMORE|SS_CANTSENDMORE); | |
94 | wakeup((caddr_t)&so->so_timeo); | |
95 | sowwakeup(so); | |
96 | sorwakeup(so); | |
97 | } | |
98 | ||
2deddea9 BJ |
99 | /* |
100 | * When an attempt at a new connection is noted on a socket | |
101 | * which accepts connections, sonewconn is called. If the | |
102 | * connection is possible (subject to space constraints, etc.) | |
103 | * then we allocate a new structure, propoerly linked into the | |
104 | * data structure of the original socket, and return this. | |
105 | */ | |
106 | struct socket * | |
107 | sonewconn(head) | |
108 | register struct socket *head; | |
109 | { | |
110 | register struct socket *so; | |
111 | struct mbuf *m; | |
112 | ||
113 | if (head->so_qlen + head->so_q0len > 3 * head->so_qlimit / 2) | |
114 | goto bad; | |
115 | m = m_getclr(M_DONTWAIT); | |
116 | if (m == 0) | |
117 | goto bad; | |
118 | so = mtod(m, struct socket *); | |
119 | so->so_type = head->so_type; | |
120 | so->so_options = head->so_options &~ SO_ACCEPTCONN; | |
121 | so->so_linger = head->so_linger; | |
122 | so->so_state = head->so_state; | |
123 | so->so_proto = head->so_proto; | |
124 | so->so_timeo = head->so_timeo; | |
125 | so->so_pgrp = head->so_pgrp; | |
126 | soqinsque(head, so, 0); | |
cf012934 | 127 | if ((*so->so_proto->pr_usrreq)(so, PRU_ATTACH, 0, 0, 0)) { |
2deddea9 BJ |
128 | (void) soqremque(so, 0); |
129 | m_free(m); | |
130 | goto bad; | |
131 | } | |
132 | return (so); | |
133 | bad: | |
134 | return ((struct socket *)0); | |
135 | } | |
136 | ||
137 | soqinsque(head, so, q) | |
138 | register struct socket *head, *so; | |
139 | int q; | |
140 | { | |
141 | ||
142 | so->so_head = head; | |
143 | if (q == 0) { | |
144 | head->so_q0len++; | |
145 | so->so_q0 = head->so_q0; | |
146 | head->so_q0 = so; | |
147 | } else { | |
148 | head->so_qlen++; | |
149 | so->so_q = head->so_q; | |
150 | head->so_q = so; | |
151 | } | |
152 | } | |
153 | ||
154 | soqremque(so, q) | |
155 | register struct socket *so; | |
156 | int q; | |
157 | { | |
158 | register struct socket *head, *prev, *next; | |
159 | ||
160 | head = so->so_head; | |
161 | prev = head; | |
162 | for (;;) { | |
163 | next = q ? prev->so_q : prev->so_q0; | |
164 | if (next == so) | |
165 | break; | |
166 | if (next == head) | |
167 | return (0); | |
168 | prev = next; | |
169 | } | |
170 | if (q == 0) { | |
171 | prev->so_q0 = next->so_q0; | |
172 | head->so_q0len--; | |
173 | } else { | |
174 | prev->so_q = next->so_q; | |
175 | head->so_qlen--; | |
176 | } | |
177 | next->so_q0 = next->so_q = 0; | |
178 | next->so_head = 0; | |
179 | return (1); | |
180 | } | |
181 | ||
4c078bb2 BJ |
182 | /* |
183 | * Socantsendmore indicates that no more data will be sent on the | |
184 | * socket; it would normally be applied to a socket when the user | |
185 | * informs the system that no more data is to be sent, by the protocol | |
186 | * code (in case PRU_SHUTDOWN). Socantrcvmore indicates that no more data | |
187 | * will be received, and will normally be applied to the socket by a | |
188 | * protocol when it detects that the peer will send no more data. | |
189 | * Data queued for reading in the socket may yet be read. | |
190 | */ | |
191 | ||
ae921915 BJ |
192 | socantsendmore(so) |
193 | struct socket *so; | |
194 | { | |
195 | ||
196 | so->so_state |= SS_CANTSENDMORE; | |
197 | sowwakeup(so); | |
198 | } | |
199 | ||
200 | socantrcvmore(so) | |
201 | struct socket *so; | |
202 | { | |
203 | ||
204 | so->so_state |= SS_CANTRCVMORE; | |
205 | sorwakeup(so); | |
206 | } | |
207 | ||
681ebb17 | 208 | /* |
4c078bb2 BJ |
209 | * Socket select/wakeup routines. |
210 | */ | |
211 | ||
212 | /* | |
213 | * Interface routine to select() system | |
214 | * call for sockets. | |
681ebb17 | 215 | */ |
477b2112 | 216 | soselect(so, rw) |
681ebb17 | 217 | register struct socket *so; |
477b2112 | 218 | int rw; |
681ebb17 | 219 | { |
f957a49a | 220 | int s = splnet(); |
681ebb17 | 221 | |
477b2112 BJ |
222 | switch (rw) { |
223 | ||
224 | case FREAD: | |
f957a49a BJ |
225 | if (soreadable(so)) { |
226 | splx(s); | |
681ebb17 | 227 | return (1); |
f957a49a | 228 | } |
681ebb17 | 229 | sbselqueue(&so->so_rcv); |
477b2112 BJ |
230 | break; |
231 | ||
232 | case FWRITE: | |
f957a49a BJ |
233 | if (sowriteable(so)) { |
234 | splx(s); | |
681ebb17 | 235 | return (1); |
f957a49a | 236 | } |
681ebb17 | 237 | sbselqueue(&so->so_snd); |
477b2112 | 238 | break; |
681ebb17 | 239 | } |
f957a49a | 240 | splx(s); |
681ebb17 BJ |
241 | return (0); |
242 | } | |
243 | ||
244 | /* | |
245 | * Queue a process for a select on a socket buffer. | |
246 | */ | |
247 | sbselqueue(sb) | |
248 | struct sockbuf *sb; | |
249 | { | |
250 | register struct proc *p; | |
251 | ||
ae921915 | 252 | if ((p = sb->sb_sel) && p->p_wchan == (caddr_t)&selwait) |
681ebb17 BJ |
253 | sb->sb_flags |= SB_COLL; |
254 | else | |
255 | sb->sb_sel = u.u_procp; | |
256 | } | |
257 | ||
ae921915 BJ |
258 | /* |
259 | * Wait for data to arrive at/drain from a socket buffer. | |
260 | */ | |
261 | sbwait(sb) | |
262 | struct sockbuf *sb; | |
263 | { | |
264 | ||
265 | sb->sb_flags |= SB_WAIT; | |
266 | sleep((caddr_t)&sb->sb_cc, PZERO+1); | |
267 | } | |
268 | ||
681ebb17 BJ |
269 | /* |
270 | * Wakeup processes waiting on a socket buffer. | |
271 | */ | |
272 | sbwakeup(sb) | |
273 | struct sockbuf *sb; | |
274 | { | |
275 | ||
276 | if (sb->sb_sel) { | |
277 | selwakeup(sb->sb_sel, sb->sb_flags & SB_COLL); | |
278 | sb->sb_sel = 0; | |
279 | sb->sb_flags &= ~SB_COLL; | |
280 | } | |
281 | if (sb->sb_flags & SB_WAIT) { | |
282 | sb->sb_flags &= ~SB_WAIT; | |
388ca8bd | 283 | wakeup((caddr_t)&sb->sb_cc); |
681ebb17 BJ |
284 | } |
285 | } | |
286 | ||
4c078bb2 BJ |
287 | /* |
288 | * Socket buffer (struct sockbuf) utility routines. | |
289 | * | |
290 | * Each socket contains two socket buffers: one for sending data and | |
291 | * one for receiving data. Each buffer contains a queue of mbufs, | |
292 | * information about the number of mbufs and amount of data in the | |
293 | * queue, and other fields allowing select() statements and notification | |
294 | * on data availability to be implemented. | |
295 | * | |
296 | * Before using a new socket structure it is first necessary to reserve | |
297 | * buffer space to the socket, by calling sbreserve. This commits | |
298 | * some of the available buffer space in the system buffer pool for the | |
299 | * socket. The space should be released by calling sbrelease when the | |
300 | * socket is destroyed. | |
301 | * | |
302 | * The routine sbappend() is normally called to append new mbufs | |
303 | * to a socket buffer, after checking that adequate space is available | |
304 | * comparing the function spspace() with the amount of data to be added. | |
305 | * Data is normally removed from a socket buffer in a protocol by | |
306 | * first calling m_copy on the socket buffer mbuf chain and sending this | |
307 | * to a peer, and then removing the data from the socket buffer with | |
308 | * sbdrop when the data is acknowledged by the peer (or immediately | |
b454c3ea | 309 | * in the case of unreliable protocols.) |
4c078bb2 BJ |
310 | * |
311 | * Protocols which do not require connections place both source address | |
312 | * and data information in socket buffer queues. The source addresses | |
313 | * are stored in single mbufs after each data item, and are easily found | |
314 | * as the data items are all marked with end of record markers. The | |
315 | * sbappendaddr() routine stores a datum and associated address in | |
316 | * a socket buffer. Note that, unlike sbappend(), this routine checks | |
317 | * for the caller that there will be enough space to store the data. | |
318 | * It fails if there is not enough space, or if it cannot find | |
319 | * a mbuf to store the address in. | |
320 | * | |
321 | * The higher-level routines sosend and soreceive (in socket.c) | |
b454c3ea | 322 | * also add data to, and remove data from socket buffers repectively. |
4c078bb2 BJ |
323 | */ |
324 | ||
681ebb17 BJ |
325 | /* |
326 | * Allot mbufs to a sockbuf. | |
327 | */ | |
328 | sbreserve(sb, cc) | |
329 | struct sockbuf *sb; | |
330 | { | |
331 | ||
de48daf3 | 332 | /* someday maybe this routine will fail... */ |
d028a086 | 333 | sb->sb_hiwat = cc; |
76a6e254 | 334 | sb->sb_mbmax = cc*2; |
ae921915 | 335 | return (1); |
681ebb17 BJ |
336 | } |
337 | ||
338 | /* | |
339 | * Free mbufs held by a socket, and reserved mbuf space. | |
340 | */ | |
341 | sbrelease(sb) | |
342 | struct sockbuf *sb; | |
343 | { | |
344 | ||
345 | sbflush(sb); | |
d028a086 | 346 | sb->sb_hiwat = sb->sb_mbmax = 0; |
681ebb17 BJ |
347 | } |
348 | ||
349 | /* | |
350 | * Routines to add (at the end) and remove (from the beginning) | |
351 | * data from a mbuf queue. | |
352 | */ | |
353 | ||
354 | /* | |
355 | * Append mbuf queue m to sockbuf sb. | |
356 | */ | |
357 | sbappend(sb, m) | |
358 | register struct mbuf *m; | |
359 | register struct sockbuf *sb; | |
360 | { | |
e495e1cc | 361 | register struct mbuf *n; |
681ebb17 | 362 | |
e495e1cc BJ |
363 | n = sb->sb_mb; |
364 | if (n) | |
365 | while (n->m_next) | |
366 | n = n->m_next; | |
681ebb17 | 367 | while (m) { |
a73ab5ae | 368 | if (m->m_len == 0 && (int)m->m_act == 0) { |
c64d826c | 369 | m = m_free(m); |
a73ab5ae BJ |
370 | continue; |
371 | } | |
681ebb17 BJ |
372 | if (n && n->m_off <= MMAXOFF && m->m_off <= MMAXOFF && |
373 | (int)n->m_act == 0 && (int)m->m_act == 0 && | |
76a6e254 BJ |
374 | (n->m_off + n->m_len + m->m_len) <= MMAXOFF) { |
375 | bcopy(mtod(m, caddr_t), mtod(n, caddr_t) + n->m_len, | |
ae921915 | 376 | (unsigned)m->m_len); |
681ebb17 BJ |
377 | n->m_len += m->m_len; |
378 | sb->sb_cc += m->m_len; | |
379 | m = m_free(m); | |
380 | continue; | |
381 | } | |
382 | sballoc(sb, m); | |
e495e1cc BJ |
383 | if (n == 0) |
384 | sb->sb_mb = m; | |
385 | else | |
386 | n->m_next = m; | |
681ebb17 | 387 | n = m; |
681ebb17 | 388 | m = m->m_next; |
e495e1cc | 389 | n->m_next = 0; |
681ebb17 BJ |
390 | } |
391 | } | |
392 | ||
4c078bb2 BJ |
393 | /* |
394 | * Append data and address. | |
395 | * Return 0 if no space in sockbuf or if | |
396 | * can't get mbuf to stuff address in. | |
397 | */ | |
2b4b57cd BJ |
398 | sbappendaddr(sb, asa, m0) |
399 | struct sockbuf *sb; | |
400 | struct sockaddr *asa; | |
401 | struct mbuf *m0; | |
402 | { | |
403 | struct sockaddr *msa; | |
404 | register struct mbuf *m; | |
405 | register int len = sizeof (struct sockaddr); | |
406 | ||
76a6e254 BJ |
407 | m = m0; |
408 | if (m == 0) | |
409 | panic("sbappendaddr"); | |
410 | for (;;) { | |
2b4b57cd | 411 | len += m->m_len; |
76a6e254 BJ |
412 | if (m->m_next == 0) { |
413 | m->m_act = (struct mbuf *)1; | |
414 | break; | |
415 | } | |
416 | m = m->m_next; | |
417 | } | |
509e40dd | 418 | if (len > sbspace(sb)) |
2b4b57cd | 419 | return (0); |
99308d84 | 420 | m = m_get(M_DONTWAIT); |
509e40dd | 421 | if (m == 0) |
2b4b57cd | 422 | return (0); |
2b4b57cd BJ |
423 | m->m_len = sizeof (struct sockaddr); |
424 | msa = mtod(m, struct sockaddr *); | |
425 | *msa = *asa; | |
426 | m->m_act = (struct mbuf *)1; | |
427 | sbappend(sb, m); | |
2b4b57cd BJ |
428 | sbappend(sb, m0); |
429 | return (1); | |
430 | } | |
431 | ||
681ebb17 BJ |
432 | /* |
433 | * Free all mbufs on a sockbuf mbuf chain. | |
434 | * Check that resource allocations return to 0. | |
435 | */ | |
436 | sbflush(sb) | |
437 | struct sockbuf *sb; | |
438 | { | |
439 | ||
440 | if (sb->sb_flags & SB_LOCK) | |
441 | panic("sbflush"); | |
a73ab5ae BJ |
442 | if (sb->sb_cc) |
443 | sbdrop(sb, sb->sb_cc); | |
681ebb17 BJ |
444 | if (sb->sb_cc || sb->sb_mbcnt || sb->sb_mb) |
445 | panic("sbflush 2"); | |
446 | } | |
447 | ||
448 | /* | |
449 | * Drop data from (the front of) a sockbuf chain. | |
450 | */ | |
451 | sbdrop(sb, len) | |
452 | register struct sockbuf *sb; | |
453 | register int len; | |
454 | { | |
455 | register struct mbuf *m = sb->sb_mb, *mn; | |
456 | ||
457 | while (len > 0) { | |
458 | if (m == 0) | |
459 | panic("sbdrop"); | |
b9f0d37f | 460 | if (m->m_len > len) { |
681ebb17 BJ |
461 | m->m_len -= len; |
462 | m->m_off += len; | |
463 | sb->sb_cc -= len; | |
464 | break; | |
465 | } | |
b9f0d37f BJ |
466 | len -= m->m_len; |
467 | sbfree(sb, m); | |
468 | MFREE(m, mn); | |
469 | m = mn; | |
681ebb17 BJ |
470 | } |
471 | sb->sb_mb = m; | |
472 | } |