Commit | Line | Data |
---|---|---|
b688fc87 WJ |
1 | /* |
2 | * Copyright (c) 1982, 1986, 1988, 1990 Regents of the University of California. | |
3 | * All rights reserved. | |
4 | * | |
5 | * Redistribution and use in source and binary forms, with or without | |
6 | * modification, are permitted provided that the following conditions | |
7 | * are met: | |
8 | * 1. Redistributions of source code must retain the above copyright | |
9 | * notice, this list of conditions and the following disclaimer. | |
10 | * 2. Redistributions in binary form must reproduce the above copyright | |
11 | * notice, this list of conditions and the following disclaimer in the | |
12 | * documentation and/or other materials provided with the distribution. | |
13 | * 3. All advertising materials mentioning features or use of this software | |
14 | * must display the following acknowledgement: | |
15 | * This product includes software developed by the University of | |
16 | * California, Berkeley and its contributors. | |
17 | * 4. Neither the name of the University nor the names of its contributors | |
18 | * may be used to endorse or promote products derived from this software | |
19 | * without specific prior written permission. | |
20 | * | |
21 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
22 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
23 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
24 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
25 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
26 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
27 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
28 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
29 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
30 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
31 | * SUCH DAMAGE. | |
32 | * | |
33 | * @(#)tcp_input.c 7.25 (Berkeley) 6/30/90 | |
34 | */ | |
35 | ||
36 | #include "param.h" | |
37 | #include "systm.h" | |
38 | #include "malloc.h" | |
39 | #include "mbuf.h" | |
40 | #include "protosw.h" | |
41 | #include "socket.h" | |
42 | #include "socketvar.h" | |
43 | #include "errno.h" | |
44 | ||
45 | #include "../net/if.h" | |
46 | #include "../net/route.h" | |
47 | ||
48 | #include "in.h" | |
49 | #include "in_systm.h" | |
50 | #include "ip.h" | |
51 | #include "in_pcb.h" | |
52 | #include "ip_var.h" | |
53 | #include "tcp.h" | |
54 | #include "tcp_fsm.h" | |
55 | #include "tcp_seq.h" | |
56 | #include "tcp_timer.h" | |
57 | #include "tcp_var.h" | |
58 | #include "tcpip.h" | |
59 | #include "tcp_debug.h" | |
60 | ||
61 | int tcprexmtthresh = 3; | |
62 | int tcppredack; /* XXX debugging: times hdr predict ok for acks */ | |
63 | int tcppreddat; /* XXX # times header prediction ok for data packets */ | |
64 | int tcppcbcachemiss; | |
65 | struct tcpiphdr tcp_saveti; | |
66 | struct inpcb *tcp_last_inpcb = &tcb; | |
67 | ||
68 | struct tcpcb *tcp_newtcpcb(); | |
69 | ||
70 | /* | |
71 | * Insert segment ti into reassembly queue of tcp with | |
72 | * control block tp. Return TH_FIN if reassembly now includes | |
73 | * a segment with FIN. The macro form does the common case inline | |
74 | * (segment is the next to be received on an established connection, | |
75 | * and the queue is empty), avoiding linkage into and removal | |
76 | * from the queue and repetition of various conversions. | |
77 | * Set DELACK for segments received in order, but ack immediately | |
78 | * when segments are out of order (so fast retransmit can work). | |
79 | */ | |
80 | #define TCP_REASS(tp, ti, m, so, flags) { \ | |
81 | if ((ti)->ti_seq == (tp)->rcv_nxt && \ | |
82 | (tp)->seg_next == (struct tcpiphdr *)(tp) && \ | |
83 | (tp)->t_state == TCPS_ESTABLISHED) { \ | |
84 | tp->t_flags |= TF_DELACK; \ | |
85 | (tp)->rcv_nxt += (ti)->ti_len; \ | |
86 | flags = (ti)->ti_flags & TH_FIN; \ | |
87 | tcpstat.tcps_rcvpack++;\ | |
88 | tcpstat.tcps_rcvbyte += (ti)->ti_len;\ | |
89 | sbappend(&(so)->so_rcv, (m)); \ | |
90 | sorwakeup(so); \ | |
91 | } else { \ | |
92 | (flags) = tcp_reass((tp), (ti), (m)); \ | |
93 | tp->t_flags |= TF_ACKNOW; \ | |
94 | } \ | |
95 | } | |
96 | ||
97 | tcp_reass(tp, ti, m) | |
98 | register struct tcpcb *tp; | |
99 | register struct tcpiphdr *ti; | |
100 | struct mbuf *m; | |
101 | { | |
102 | register struct tcpiphdr *q; | |
103 | struct socket *so = tp->t_inpcb->inp_socket; | |
104 | int flags; | |
105 | ||
106 | /* | |
107 | * Call with ti==0 after become established to | |
108 | * force pre-ESTABLISHED data up to user socket. | |
109 | */ | |
110 | if (ti == 0) | |
111 | goto present; | |
112 | ||
113 | /* | |
114 | * Find a segment which begins after this one does. | |
115 | */ | |
116 | for (q = tp->seg_next; q != (struct tcpiphdr *)tp; | |
117 | q = (struct tcpiphdr *)q->ti_next) | |
118 | if (SEQ_GT(q->ti_seq, ti->ti_seq)) | |
119 | break; | |
120 | ||
121 | /* | |
122 | * If there is a preceding segment, it may provide some of | |
123 | * our data already. If so, drop the data from the incoming | |
124 | * segment. If it provides all of our data, drop us. | |
125 | */ | |
126 | if ((struct tcpiphdr *)q->ti_prev != (struct tcpiphdr *)tp) { | |
127 | register int i; | |
128 | q = (struct tcpiphdr *)q->ti_prev; | |
129 | /* conversion to int (in i) handles seq wraparound */ | |
130 | i = q->ti_seq + q->ti_len - ti->ti_seq; | |
131 | if (i > 0) { | |
132 | if (i >= ti->ti_len) { | |
133 | tcpstat.tcps_rcvduppack++; | |
134 | tcpstat.tcps_rcvdupbyte += ti->ti_len; | |
135 | m_freem(m); | |
136 | return (0); | |
137 | } | |
138 | m_adj(m, i); | |
139 | ti->ti_len -= i; | |
140 | ti->ti_seq += i; | |
141 | } | |
142 | q = (struct tcpiphdr *)(q->ti_next); | |
143 | } | |
144 | tcpstat.tcps_rcvoopack++; | |
145 | tcpstat.tcps_rcvoobyte += ti->ti_len; | |
146 | REASS_MBUF(ti) = m; /* XXX */ | |
147 | ||
148 | /* | |
149 | * While we overlap succeeding segments trim them or, | |
150 | * if they are completely covered, dequeue them. | |
151 | */ | |
152 | while (q != (struct tcpiphdr *)tp) { | |
153 | register int i = (ti->ti_seq + ti->ti_len) - q->ti_seq; | |
154 | if (i <= 0) | |
155 | break; | |
156 | if (i < q->ti_len) { | |
157 | q->ti_seq += i; | |
158 | q->ti_len -= i; | |
159 | m_adj(REASS_MBUF(q), i); | |
160 | break; | |
161 | } | |
162 | q = (struct tcpiphdr *)q->ti_next; | |
163 | m = REASS_MBUF((struct tcpiphdr *)q->ti_prev); | |
164 | remque(q->ti_prev); | |
165 | m_freem(m); | |
166 | } | |
167 | ||
168 | /* | |
169 | * Stick new segment in its place. | |
170 | */ | |
171 | insque(ti, q->ti_prev); | |
172 | ||
173 | present: | |
174 | /* | |
175 | * Present data to user, advancing rcv_nxt through | |
176 | * completed sequence space. | |
177 | */ | |
178 | if (TCPS_HAVERCVDSYN(tp->t_state) == 0) | |
179 | return (0); | |
180 | ti = tp->seg_next; | |
181 | if (ti == (struct tcpiphdr *)tp || ti->ti_seq != tp->rcv_nxt) | |
182 | return (0); | |
183 | if (tp->t_state == TCPS_SYN_RECEIVED && ti->ti_len) | |
184 | return (0); | |
185 | do { | |
186 | tp->rcv_nxt += ti->ti_len; | |
187 | flags = ti->ti_flags & TH_FIN; | |
188 | remque(ti); | |
189 | m = REASS_MBUF(ti); | |
190 | ti = (struct tcpiphdr *)ti->ti_next; | |
191 | if (so->so_state & SS_CANTRCVMORE) | |
192 | m_freem(m); | |
193 | else | |
194 | sbappend(&so->so_rcv, m); | |
195 | } while (ti != (struct tcpiphdr *)tp && ti->ti_seq == tp->rcv_nxt); | |
196 | sorwakeup(so); | |
197 | return (flags); | |
198 | } | |
199 | ||
200 | /* | |
201 | * TCP input routine, follows pages 65-76 of the | |
202 | * protocol specification dated September, 1981 very closely. | |
203 | */ | |
204 | tcp_input(m, iphlen) | |
205 | register struct mbuf *m; | |
206 | int iphlen; | |
207 | { | |
208 | register struct tcpiphdr *ti; | |
209 | register struct inpcb *inp; | |
210 | struct mbuf *om = 0; | |
211 | int len, tlen, off; | |
212 | register struct tcpcb *tp = 0; | |
213 | register int tiflags; | |
214 | struct socket *so; | |
215 | int todrop, acked, ourfinisacked, needoutput = 0; | |
216 | short ostate; | |
217 | struct in_addr laddr; | |
218 | int dropsocket = 0; | |
219 | int iss = 0; | |
220 | ||
221 | tcpstat.tcps_rcvtotal++; | |
222 | /* | |
223 | * Get IP and TCP header together in first mbuf. | |
224 | * Note: IP leaves IP header in first mbuf. | |
225 | */ | |
226 | ti = mtod(m, struct tcpiphdr *); | |
227 | if (iphlen > sizeof (struct ip)) | |
228 | ip_stripoptions(m, (struct mbuf *)0); | |
229 | if (m->m_len < sizeof (struct tcpiphdr)) { | |
230 | if ((m = m_pullup(m, sizeof (struct tcpiphdr))) == 0) { | |
231 | tcpstat.tcps_rcvshort++; | |
232 | return; | |
233 | } | |
234 | ti = mtod(m, struct tcpiphdr *); | |
235 | } | |
236 | ||
237 | /* | |
238 | * Checksum extended TCP header and data. | |
239 | */ | |
240 | tlen = ((struct ip *)ti)->ip_len; | |
241 | len = sizeof (struct ip) + tlen; | |
242 | ti->ti_next = ti->ti_prev = 0; | |
243 | ti->ti_x1 = 0; | |
244 | ti->ti_len = (u_short)tlen; | |
245 | HTONS(ti->ti_len); | |
246 | if (ti->ti_sum = in_cksum(m, len)) { | |
247 | tcpstat.tcps_rcvbadsum++; | |
248 | goto drop; | |
249 | } | |
250 | ||
251 | /* | |
252 | * Check that TCP offset makes sense, | |
253 | * pull out TCP options and adjust length. XXX | |
254 | */ | |
255 | off = ti->ti_off << 2; | |
256 | if (off < sizeof (struct tcphdr) || off > tlen) { | |
257 | tcpstat.tcps_rcvbadoff++; | |
258 | goto drop; | |
259 | } | |
260 | tlen -= off; | |
261 | ti->ti_len = tlen; | |
262 | if (off > sizeof (struct tcphdr)) { | |
263 | if (m->m_len < sizeof(struct ip) + off) { | |
264 | if ((m = m_pullup(m, sizeof (struct ip) + off)) == 0) { | |
265 | tcpstat.tcps_rcvshort++; | |
266 | return; | |
267 | } | |
268 | ti = mtod(m, struct tcpiphdr *); | |
269 | } | |
270 | om = m_get(M_DONTWAIT, MT_DATA); | |
271 | if (om == 0) | |
272 | goto drop; | |
273 | om->m_len = off - sizeof (struct tcphdr); | |
274 | { caddr_t op = mtod(m, caddr_t) + sizeof (struct tcpiphdr); | |
275 | bcopy(op, mtod(om, caddr_t), (unsigned)om->m_len); | |
276 | m->m_len -= om->m_len; | |
277 | m->m_pkthdr.len -= om->m_len; | |
278 | bcopy(op+om->m_len, op, | |
279 | (unsigned)(m->m_len-sizeof (struct tcpiphdr))); | |
280 | } | |
281 | } | |
282 | tiflags = ti->ti_flags; | |
283 | ||
284 | /* | |
285 | * Convert TCP protocol specific fields to host format. | |
286 | */ | |
287 | NTOHL(ti->ti_seq); | |
288 | NTOHL(ti->ti_ack); | |
289 | NTOHS(ti->ti_win); | |
290 | NTOHS(ti->ti_urp); | |
291 | ||
292 | /* | |
293 | * Locate pcb for segment. | |
294 | */ | |
295 | findpcb: | |
296 | inp = tcp_last_inpcb; | |
297 | if (inp->inp_lport != ti->ti_dport || | |
298 | inp->inp_fport != ti->ti_sport || | |
299 | inp->inp_faddr.s_addr != ti->ti_src.s_addr || | |
300 | inp->inp_laddr.s_addr != ti->ti_dst.s_addr) { | |
301 | inp = in_pcblookup(&tcb, ti->ti_src, ti->ti_sport, | |
302 | ti->ti_dst, ti->ti_dport, INPLOOKUP_WILDCARD); | |
303 | if (inp) | |
304 | tcp_last_inpcb = inp; | |
305 | ++tcppcbcachemiss; | |
306 | } | |
307 | ||
308 | /* | |
309 | * If the state is CLOSED (i.e., TCB does not exist) then | |
310 | * all data in the incoming segment is discarded. | |
311 | * If the TCB exists but is in CLOSED state, it is embryonic, | |
312 | * but should either do a listen or a connect soon. | |
313 | */ | |
314 | if (inp == 0) | |
315 | goto dropwithreset; | |
316 | tp = intotcpcb(inp); | |
317 | if (tp == 0) | |
318 | goto dropwithreset; | |
319 | if (tp->t_state == TCPS_CLOSED) | |
320 | goto drop; | |
321 | so = inp->inp_socket; | |
322 | if (so->so_options & (SO_DEBUG|SO_ACCEPTCONN)) { | |
323 | if (so->so_options & SO_DEBUG) { | |
324 | ostate = tp->t_state; | |
325 | tcp_saveti = *ti; | |
326 | } | |
327 | if (so->so_options & SO_ACCEPTCONN) { | |
328 | so = sonewconn(so, 0); | |
329 | if (so == 0) | |
330 | goto drop; | |
331 | /* | |
332 | * This is ugly, but .... | |
333 | * | |
334 | * Mark socket as temporary until we're | |
335 | * committed to keeping it. The code at | |
336 | * ``drop'' and ``dropwithreset'' check the | |
337 | * flag dropsocket to see if the temporary | |
338 | * socket created here should be discarded. | |
339 | * We mark the socket as discardable until | |
340 | * we're committed to it below in TCPS_LISTEN. | |
341 | */ | |
342 | dropsocket++; | |
343 | inp = (struct inpcb *)so->so_pcb; | |
344 | inp->inp_laddr = ti->ti_dst; | |
345 | inp->inp_lport = ti->ti_dport; | |
346 | #if BSD>=43 | |
347 | inp->inp_options = ip_srcroute(); | |
348 | #endif | |
349 | tp = intotcpcb(inp); | |
350 | tp->t_state = TCPS_LISTEN; | |
351 | } | |
352 | } | |
353 | ||
354 | /* | |
355 | * Segment received on connection. | |
356 | * Reset idle time and keep-alive timer. | |
357 | */ | |
358 | tp->t_idle = 0; | |
359 | tp->t_timer[TCPT_KEEP] = tcp_keepidle; | |
360 | ||
361 | /* | |
362 | * Process options if not in LISTEN state, | |
363 | * else do it below (after getting remote address). | |
364 | */ | |
365 | if (om && tp->t_state != TCPS_LISTEN) { | |
366 | tcp_dooptions(tp, om, ti); | |
367 | om = 0; | |
368 | } | |
369 | /* | |
370 | * Header prediction: check for the two common cases | |
371 | * of a uni-directional data xfer. If the packet has | |
372 | * no control flags, is in-sequence, the window didn't | |
373 | * change and we're not retransmitting, it's a | |
374 | * candidate. If the length is zero and the ack moved | |
375 | * forward, we're the sender side of the xfer. Just | |
376 | * free the data acked & wake any higher level process | |
377 | * that was blocked waiting for space. If the length | |
378 | * is non-zero and the ack didn't move, we're the | |
379 | * receiver side. If we're getting packets in-order | |
380 | * (the reassembly queue is empty), add the data to | |
381 | * the socket buffer and note that we need a delayed ack. | |
382 | */ | |
383 | if (tp->t_state == TCPS_ESTABLISHED && | |
384 | (tiflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK && | |
385 | ti->ti_seq == tp->rcv_nxt && | |
386 | ti->ti_win && ti->ti_win == tp->snd_wnd && | |
387 | tp->snd_nxt == tp->snd_max) { | |
388 | if (ti->ti_len == 0) { | |
389 | if (SEQ_GT(ti->ti_ack, tp->snd_una) && | |
390 | SEQ_LEQ(ti->ti_ack, tp->snd_max) && | |
391 | tp->snd_cwnd >= tp->snd_wnd) { | |
392 | /* | |
393 | * this is a pure ack for outstanding data. | |
394 | */ | |
395 | ++tcppredack; | |
396 | if (tp->t_rtt && SEQ_GT(ti->ti_ack,tp->t_rtseq)) | |
397 | tcp_xmit_timer(tp); | |
398 | acked = ti->ti_ack - tp->snd_una; | |
399 | tcpstat.tcps_rcvackpack++; | |
400 | tcpstat.tcps_rcvackbyte += acked; | |
401 | sbdrop(&so->so_snd, acked); | |
402 | tp->snd_una = ti->ti_ack; | |
403 | m_freem(m); | |
404 | ||
405 | /* | |
406 | * If all outstanding data are acked, stop | |
407 | * retransmit timer, otherwise restart timer | |
408 | * using current (possibly backed-off) value. | |
409 | * If process is waiting for space, | |
410 | * wakeup/selwakeup/signal. If data | |
411 | * are ready to send, let tcp_output | |
412 | * decide between more output or persist. | |
413 | */ | |
414 | if (tp->snd_una == tp->snd_max) | |
415 | tp->t_timer[TCPT_REXMT] = 0; | |
416 | else if (tp->t_timer[TCPT_PERSIST] == 0) | |
417 | tp->t_timer[TCPT_REXMT] = tp->t_rxtcur; | |
418 | ||
419 | if (so->so_snd.sb_flags & SB_NOTIFY) | |
420 | sowwakeup(so); | |
421 | if (so->so_snd.sb_cc) | |
422 | (void) tcp_output(tp); | |
423 | return; | |
424 | } | |
425 | } else if (ti->ti_ack == tp->snd_una && | |
426 | tp->seg_next == (struct tcpiphdr *)tp && | |
427 | ti->ti_len <= sbspace(&so->so_rcv)) { | |
428 | /* | |
429 | * this is a pure, in-sequence data packet | |
430 | * with nothing on the reassembly queue and | |
431 | * we have enough buffer space to take it. | |
432 | */ | |
433 | ++tcppreddat; | |
434 | tp->rcv_nxt += ti->ti_len; | |
435 | tcpstat.tcps_rcvpack++; | |
436 | tcpstat.tcps_rcvbyte += ti->ti_len; | |
437 | /* | |
438 | * Drop TCP and IP headers then add data | |
439 | * to socket buffer | |
440 | */ | |
441 | m->m_data += sizeof(struct tcpiphdr); | |
442 | m->m_len -= sizeof(struct tcpiphdr); | |
443 | sbappend(&so->so_rcv, m); | |
444 | sorwakeup(so); | |
445 | tp->t_flags |= TF_DELACK; | |
446 | return; | |
447 | } | |
448 | } | |
449 | ||
450 | /* | |
451 | * Drop TCP and IP headers; TCP options were dropped above. | |
452 | */ | |
453 | m->m_data += sizeof(struct tcpiphdr); | |
454 | m->m_len -= sizeof(struct tcpiphdr); | |
455 | ||
456 | /* | |
457 | * Calculate amount of space in receive window, | |
458 | * and then do TCP input processing. | |
459 | * Receive window is amount of space in rcv queue, | |
460 | * but not less than advertised window. | |
461 | */ | |
462 | { int win; | |
463 | ||
464 | win = sbspace(&so->so_rcv); | |
465 | if (win < 0) | |
466 | win = 0; | |
467 | tp->rcv_wnd = max(win, (int)(tp->rcv_adv - tp->rcv_nxt)); | |
468 | } | |
469 | ||
470 | switch (tp->t_state) { | |
471 | ||
472 | /* | |
473 | * If the state is LISTEN then ignore segment if it contains an RST. | |
474 | * If the segment contains an ACK then it is bad and send a RST. | |
475 | * If it does not contain a SYN then it is not interesting; drop it. | |
476 | * Don't bother responding if the destination was a broadcast. | |
477 | * Otherwise initialize tp->rcv_nxt, and tp->irs, select an initial | |
478 | * tp->iss, and send a segment: | |
479 | * <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK> | |
480 | * Also initialize tp->snd_nxt to tp->iss+1 and tp->snd_una to tp->iss. | |
481 | * Fill in remote peer address fields if not previously specified. | |
482 | * Enter SYN_RECEIVED state, and process any other fields of this | |
483 | * segment in this state. | |
484 | */ | |
485 | case TCPS_LISTEN: { | |
486 | struct mbuf *am; | |
487 | register struct sockaddr_in *sin; | |
488 | ||
489 | if (tiflags & TH_RST) | |
490 | goto drop; | |
491 | if (tiflags & TH_ACK) | |
492 | goto dropwithreset; | |
493 | if ((tiflags & TH_SYN) == 0) | |
494 | goto drop; | |
495 | if (m->m_flags & M_BCAST) | |
496 | goto drop; | |
497 | am = m_get(M_DONTWAIT, MT_SONAME); /* XXX */ | |
498 | if (am == NULL) | |
499 | goto drop; | |
500 | am->m_len = sizeof (struct sockaddr_in); | |
501 | sin = mtod(am, struct sockaddr_in *); | |
502 | sin->sin_family = AF_INET; | |
503 | sin->sin_len = sizeof(*sin); | |
504 | sin->sin_addr = ti->ti_src; | |
505 | sin->sin_port = ti->ti_sport; | |
506 | laddr = inp->inp_laddr; | |
507 | if (inp->inp_laddr.s_addr == INADDR_ANY) | |
508 | inp->inp_laddr = ti->ti_dst; | |
509 | if (in_pcbconnect(inp, am)) { | |
510 | inp->inp_laddr = laddr; | |
511 | (void) m_free(am); | |
512 | goto drop; | |
513 | } | |
514 | (void) m_free(am); | |
515 | tp->t_template = tcp_template(tp); | |
516 | if (tp->t_template == 0) { | |
517 | tp = tcp_drop(tp, ENOBUFS); | |
518 | dropsocket = 0; /* socket is already gone */ | |
519 | goto drop; | |
520 | } | |
521 | if (om) { | |
522 | tcp_dooptions(tp, om, ti); | |
523 | om = 0; | |
524 | } | |
525 | if (iss) | |
526 | tp->iss = iss; | |
527 | else | |
528 | tp->iss = tcp_iss; | |
529 | tcp_iss += TCP_ISSINCR/2; | |
530 | tp->irs = ti->ti_seq; | |
531 | tcp_sendseqinit(tp); | |
532 | tcp_rcvseqinit(tp); | |
533 | tp->t_flags |= TF_ACKNOW; | |
534 | tp->t_state = TCPS_SYN_RECEIVED; | |
535 | tp->t_timer[TCPT_KEEP] = TCPTV_KEEP_INIT; | |
536 | dropsocket = 0; /* committed to socket */ | |
537 | tcpstat.tcps_accepts++; | |
538 | goto trimthenstep6; | |
539 | } | |
540 | ||
541 | /* | |
542 | * If the state is SYN_SENT: | |
543 | * if seg contains an ACK, but not for our SYN, drop the input. | |
544 | * if seg contains a RST, then drop the connection. | |
545 | * if seg does not contain SYN, then drop it. | |
546 | * Otherwise this is an acceptable SYN segment | |
547 | * initialize tp->rcv_nxt and tp->irs | |
548 | * if seg contains ack then advance tp->snd_una | |
549 | * if SYN has been acked change to ESTABLISHED else SYN_RCVD state | |
550 | * arrange for segment to be acked (eventually) | |
551 | * continue processing rest of data/controls, beginning with URG | |
552 | */ | |
553 | case TCPS_SYN_SENT: | |
554 | if ((tiflags & TH_ACK) && | |
555 | (SEQ_LEQ(ti->ti_ack, tp->iss) || | |
556 | SEQ_GT(ti->ti_ack, tp->snd_max))) | |
557 | goto dropwithreset; | |
558 | if (tiflags & TH_RST) { | |
559 | if (tiflags & TH_ACK) | |
560 | tp = tcp_drop(tp, ECONNREFUSED); | |
561 | goto drop; | |
562 | } | |
563 | if ((tiflags & TH_SYN) == 0) | |
564 | goto drop; | |
565 | if (tiflags & TH_ACK) { | |
566 | tp->snd_una = ti->ti_ack; | |
567 | if (SEQ_LT(tp->snd_nxt, tp->snd_una)) | |
568 | tp->snd_nxt = tp->snd_una; | |
569 | } | |
570 | tp->t_timer[TCPT_REXMT] = 0; | |
571 | tp->irs = ti->ti_seq; | |
572 | tcp_rcvseqinit(tp); | |
573 | tp->t_flags |= TF_ACKNOW; | |
574 | if (tiflags & TH_ACK && SEQ_GT(tp->snd_una, tp->iss)) { | |
575 | tcpstat.tcps_connects++; | |
576 | soisconnected(so); | |
577 | tp->t_state = TCPS_ESTABLISHED; | |
578 | (void) tcp_reass(tp, (struct tcpiphdr *)0, | |
579 | (struct mbuf *)0); | |
580 | /* | |
581 | * if we didn't have to retransmit the SYN, | |
582 | * use its rtt as our initial srtt & rtt var. | |
583 | */ | |
584 | if (tp->t_rtt) | |
585 | tcp_xmit_timer(tp); | |
586 | } else | |
587 | tp->t_state = TCPS_SYN_RECEIVED; | |
588 | ||
589 | trimthenstep6: | |
590 | /* | |
591 | * Advance ti->ti_seq to correspond to first data byte. | |
592 | * If data, trim to stay within window, | |
593 | * dropping FIN if necessary. | |
594 | */ | |
595 | ti->ti_seq++; | |
596 | if (ti->ti_len > tp->rcv_wnd) { | |
597 | todrop = ti->ti_len - tp->rcv_wnd; | |
598 | m_adj(m, -todrop); | |
599 | ti->ti_len = tp->rcv_wnd; | |
600 | tiflags &= ~TH_FIN; | |
601 | tcpstat.tcps_rcvpackafterwin++; | |
602 | tcpstat.tcps_rcvbyteafterwin += todrop; | |
603 | } | |
604 | tp->snd_wl1 = ti->ti_seq - 1; | |
605 | tp->rcv_up = ti->ti_seq; | |
606 | goto step6; | |
607 | } | |
608 | ||
609 | /* | |
610 | * States other than LISTEN or SYN_SENT. | |
611 | * First check that at least some bytes of segment are within | |
612 | * receive window. If segment begins before rcv_nxt, | |
613 | * drop leading data (and SYN); if nothing left, just ack. | |
614 | */ | |
615 | todrop = tp->rcv_nxt - ti->ti_seq; | |
616 | if (todrop > 0) { | |
617 | if (tiflags & TH_SYN) { | |
618 | tiflags &= ~TH_SYN; | |
619 | ti->ti_seq++; | |
620 | if (ti->ti_urp > 1) | |
621 | ti->ti_urp--; | |
622 | else | |
623 | tiflags &= ~TH_URG; | |
624 | todrop--; | |
625 | } | |
626 | if (todrop > ti->ti_len || | |
627 | todrop == ti->ti_len && (tiflags&TH_FIN) == 0) { | |
628 | tcpstat.tcps_rcvduppack++; | |
629 | tcpstat.tcps_rcvdupbyte += ti->ti_len; | |
630 | /* | |
631 | * If segment is just one to the left of the window, | |
632 | * check two special cases: | |
633 | * 1. Don't toss RST in response to 4.2-style keepalive. | |
634 | * 2. If the only thing to drop is a FIN, we can drop | |
635 | * it, but check the ACK or we will get into FIN | |
636 | * wars if our FINs crossed (both CLOSING). | |
637 | * In either case, send ACK to resynchronize, | |
638 | * but keep on processing for RST or ACK. | |
639 | */ | |
640 | if ((tiflags & TH_FIN && todrop == ti->ti_len + 1) | |
641 | #ifdef TCP_COMPAT_42 | |
642 | || (tiflags & TH_RST && ti->ti_seq == tp->rcv_nxt - 1) | |
643 | #endif | |
644 | ) { | |
645 | todrop = ti->ti_len; | |
646 | tiflags &= ~TH_FIN; | |
647 | tp->t_flags |= TF_ACKNOW; | |
648 | } else | |
649 | goto dropafterack; | |
650 | } else { | |
651 | tcpstat.tcps_rcvpartduppack++; | |
652 | tcpstat.tcps_rcvpartdupbyte += todrop; | |
653 | } | |
654 | m_adj(m, todrop); | |
655 | ti->ti_seq += todrop; | |
656 | ti->ti_len -= todrop; | |
657 | if (ti->ti_urp > todrop) | |
658 | ti->ti_urp -= todrop; | |
659 | else { | |
660 | tiflags &= ~TH_URG; | |
661 | ti->ti_urp = 0; | |
662 | } | |
663 | } | |
664 | ||
665 | /* | |
666 | * If new data are received on a connection after the | |
667 | * user processes are gone, then RST the other end. | |
668 | */ | |
669 | if ((so->so_state & SS_NOFDREF) && | |
670 | tp->t_state > TCPS_CLOSE_WAIT && ti->ti_len) { | |
671 | tp = tcp_close(tp); | |
672 | tcpstat.tcps_rcvafterclose++; | |
673 | goto dropwithreset; | |
674 | } | |
675 | ||
676 | /* | |
677 | * If segment ends after window, drop trailing data | |
678 | * (and PUSH and FIN); if nothing left, just ACK. | |
679 | */ | |
680 | todrop = (ti->ti_seq+ti->ti_len) - (tp->rcv_nxt+tp->rcv_wnd); | |
681 | if (todrop > 0) { | |
682 | tcpstat.tcps_rcvpackafterwin++; | |
683 | if (todrop >= ti->ti_len) { | |
684 | tcpstat.tcps_rcvbyteafterwin += ti->ti_len; | |
685 | /* | |
686 | * If a new connection request is received | |
687 | * while in TIME_WAIT, drop the old connection | |
688 | * and start over if the sequence numbers | |
689 | * are above the previous ones. | |
690 | */ | |
691 | if (tiflags & TH_SYN && | |
692 | tp->t_state == TCPS_TIME_WAIT && | |
693 | SEQ_GT(ti->ti_seq, tp->rcv_nxt)) { | |
694 | iss = tp->rcv_nxt + TCP_ISSINCR; | |
695 | tp = tcp_close(tp); | |
696 | goto findpcb; | |
697 | } | |
698 | /* | |
699 | * If window is closed can only take segments at | |
700 | * window edge, and have to drop data and PUSH from | |
701 | * incoming segments. Continue processing, but | |
702 | * remember to ack. Otherwise, drop segment | |
703 | * and ack. | |
704 | */ | |
705 | if (tp->rcv_wnd == 0 && ti->ti_seq == tp->rcv_nxt) { | |
706 | tp->t_flags |= TF_ACKNOW; | |
707 | tcpstat.tcps_rcvwinprobe++; | |
708 | } else | |
709 | goto dropafterack; | |
710 | } else | |
711 | tcpstat.tcps_rcvbyteafterwin += todrop; | |
712 | m_adj(m, -todrop); | |
713 | ti->ti_len -= todrop; | |
714 | tiflags &= ~(TH_PUSH|TH_FIN); | |
715 | } | |
716 | ||
717 | /* | |
718 | * If the RST bit is set examine the state: | |
719 | * SYN_RECEIVED STATE: | |
720 | * If passive open, return to LISTEN state. | |
721 | * If active open, inform user that connection was refused. | |
722 | * ESTABLISHED, FIN_WAIT_1, FIN_WAIT2, CLOSE_WAIT STATES: | |
723 | * Inform user that connection was reset, and close tcb. | |
724 | * CLOSING, LAST_ACK, TIME_WAIT STATES | |
725 | * Close the tcb. | |
726 | */ | |
727 | if (tiflags&TH_RST) switch (tp->t_state) { | |
728 | ||
729 | case TCPS_SYN_RECEIVED: | |
730 | so->so_error = ECONNREFUSED; | |
731 | goto close; | |
732 | ||
733 | case TCPS_ESTABLISHED: | |
734 | case TCPS_FIN_WAIT_1: | |
735 | case TCPS_FIN_WAIT_2: | |
736 | case TCPS_CLOSE_WAIT: | |
737 | so->so_error = ECONNRESET; | |
738 | close: | |
739 | tp->t_state = TCPS_CLOSED; | |
740 | tcpstat.tcps_drops++; | |
741 | tp = tcp_close(tp); | |
742 | goto drop; | |
743 | ||
744 | case TCPS_CLOSING: | |
745 | case TCPS_LAST_ACK: | |
746 | case TCPS_TIME_WAIT: | |
747 | tp = tcp_close(tp); | |
748 | goto drop; | |
749 | } | |
750 | ||
751 | /* | |
752 | * If a SYN is in the window, then this is an | |
753 | * error and we send an RST and drop the connection. | |
754 | */ | |
755 | if (tiflags & TH_SYN) { | |
756 | tp = tcp_drop(tp, ECONNRESET); | |
757 | goto dropwithreset; | |
758 | } | |
759 | ||
760 | /* | |
761 | * If the ACK bit is off we drop the segment and return. | |
762 | */ | |
763 | if ((tiflags & TH_ACK) == 0) | |
764 | goto drop; | |
765 | ||
766 | /* | |
767 | * Ack processing. | |
768 | */ | |
769 | switch (tp->t_state) { | |
770 | ||
771 | /* | |
772 | * In SYN_RECEIVED state if the ack ACKs our SYN then enter | |
773 | * ESTABLISHED state and continue processing, otherwise | |
774 | * send an RST. | |
775 | */ | |
776 | case TCPS_SYN_RECEIVED: | |
777 | if (SEQ_GT(tp->snd_una, ti->ti_ack) || | |
778 | SEQ_GT(ti->ti_ack, tp->snd_max)) | |
779 | goto dropwithreset; | |
780 | tcpstat.tcps_connects++; | |
781 | soisconnected(so); | |
782 | tp->t_state = TCPS_ESTABLISHED; | |
783 | (void) tcp_reass(tp, (struct tcpiphdr *)0, (struct mbuf *)0); | |
784 | tp->snd_wl1 = ti->ti_seq - 1; | |
785 | /* fall into ... */ | |
786 | ||
787 | /* | |
788 | * In ESTABLISHED state: drop duplicate ACKs; ACK out of range | |
789 | * ACKs. If the ack is in the range | |
790 | * tp->snd_una < ti->ti_ack <= tp->snd_max | |
791 | * then advance tp->snd_una to ti->ti_ack and drop | |
792 | * data from the retransmission queue. If this ACK reflects | |
793 | * more up to date window information we update our window information. | |
794 | */ | |
795 | case TCPS_ESTABLISHED: | |
796 | case TCPS_FIN_WAIT_1: | |
797 | case TCPS_FIN_WAIT_2: | |
798 | case TCPS_CLOSE_WAIT: | |
799 | case TCPS_CLOSING: | |
800 | case TCPS_LAST_ACK: | |
801 | case TCPS_TIME_WAIT: | |
802 | ||
803 | if (SEQ_LEQ(ti->ti_ack, tp->snd_una)) { | |
804 | if (ti->ti_len == 0 && ti->ti_win == tp->snd_wnd) { | |
805 | tcpstat.tcps_rcvdupack++; | |
806 | /* | |
807 | * If we have outstanding data (other than | |
808 | * a window probe), this is a completely | |
809 | * duplicate ack (ie, window info didn't | |
810 | * change), the ack is the biggest we've | |
811 | * seen and we've seen exactly our rexmt | |
812 | * threshhold of them, assume a packet | |
813 | * has been dropped and retransmit it. | |
814 | * Kludge snd_nxt & the congestion | |
815 | * window so we send only this one | |
816 | * packet. | |
817 | * | |
818 | * We know we're losing at the current | |
819 | * window size so do congestion avoidance | |
820 | * (set ssthresh to half the current window | |
821 | * and pull our congestion window back to | |
822 | * the new ssthresh). | |
823 | * | |
824 | * Dup acks mean that packets have left the | |
825 | * network (they're now cached at the receiver) | |
826 | * so bump cwnd by the amount in the receiver | |
827 | * to keep a constant cwnd packets in the | |
828 | * network. | |
829 | */ | |
830 | if (tp->t_timer[TCPT_REXMT] == 0 || | |
831 | ti->ti_ack != tp->snd_una) | |
832 | tp->t_dupacks = 0; | |
833 | else if (++tp->t_dupacks == tcprexmtthresh) { | |
834 | tcp_seq onxt = tp->snd_nxt; | |
835 | u_int win = | |
836 | min(tp->snd_wnd, tp->snd_cwnd) / 2 / | |
837 | tp->t_maxseg; | |
838 | ||
839 | if (win < 2) | |
840 | win = 2; | |
841 | tp->snd_ssthresh = win * tp->t_maxseg; | |
842 | tp->t_timer[TCPT_REXMT] = 0; | |
843 | tp->t_rtt = 0; | |
844 | tp->snd_nxt = ti->ti_ack; | |
845 | tp->snd_cwnd = tp->t_maxseg; | |
846 | (void) tcp_output(tp); | |
847 | tp->snd_cwnd = tp->snd_ssthresh + | |
848 | tp->t_maxseg * tp->t_dupacks; | |
849 | if (SEQ_GT(onxt, tp->snd_nxt)) | |
850 | tp->snd_nxt = onxt; | |
851 | goto drop; | |
852 | } else if (tp->t_dupacks > tcprexmtthresh) { | |
853 | tp->snd_cwnd += tp->t_maxseg; | |
854 | (void) tcp_output(tp); | |
855 | goto drop; | |
856 | } | |
857 | } else | |
858 | tp->t_dupacks = 0; | |
859 | break; | |
860 | } | |
861 | /* | |
862 | * If the congestion window was inflated to account | |
863 | * for the other side's cached packets, retract it. | |
864 | */ | |
865 | if (tp->t_dupacks > tcprexmtthresh && | |
866 | tp->snd_cwnd > tp->snd_ssthresh) | |
867 | tp->snd_cwnd = tp->snd_ssthresh; | |
868 | tp->t_dupacks = 0; | |
869 | if (SEQ_GT(ti->ti_ack, tp->snd_max)) { | |
870 | tcpstat.tcps_rcvacktoomuch++; | |
871 | goto dropafterack; | |
872 | } | |
873 | acked = ti->ti_ack - tp->snd_una; | |
874 | tcpstat.tcps_rcvackpack++; | |
875 | tcpstat.tcps_rcvackbyte += acked; | |
876 | ||
877 | /* | |
878 | * If transmit timer is running and timed sequence | |
879 | * number was acked, update smoothed round trip time. | |
880 | * Since we now have an rtt measurement, cancel the | |
881 | * timer backoff (cf., Phil Karn's retransmit alg.). | |
882 | * Recompute the initial retransmit timer. | |
883 | */ | |
884 | if (tp->t_rtt && SEQ_GT(ti->ti_ack, tp->t_rtseq)) | |
885 | tcp_xmit_timer(tp); | |
886 | ||
887 | /* | |
888 | * If all outstanding data is acked, stop retransmit | |
889 | * timer and remember to restart (more output or persist). | |
890 | * If there is more data to be acked, restart retransmit | |
891 | * timer, using current (possibly backed-off) value. | |
892 | */ | |
893 | if (ti->ti_ack == tp->snd_max) { | |
894 | tp->t_timer[TCPT_REXMT] = 0; | |
895 | needoutput = 1; | |
896 | } else if (tp->t_timer[TCPT_PERSIST] == 0) | |
897 | tp->t_timer[TCPT_REXMT] = tp->t_rxtcur; | |
898 | /* | |
899 | * When new data is acked, open the congestion window. | |
900 | * If the window gives us less than ssthresh packets | |
901 | * in flight, open exponentially (maxseg per packet). | |
902 | * Otherwise open linearly: maxseg per window | |
903 | * (maxseg^2 / cwnd per packet), plus a constant | |
904 | * fraction of a packet (maxseg/8) to help larger windows | |
905 | * open quickly enough. | |
906 | */ | |
907 | { | |
908 | register u_int cw = tp->snd_cwnd; | |
909 | register u_int incr = tp->t_maxseg; | |
910 | ||
911 | if (cw > tp->snd_ssthresh) | |
912 | incr = incr * incr / cw + incr / 8; | |
913 | tp->snd_cwnd = min(cw + incr, TCP_MAXWIN); | |
914 | } | |
915 | if (acked > so->so_snd.sb_cc) { | |
916 | tp->snd_wnd -= so->so_snd.sb_cc; | |
917 | sbdrop(&so->so_snd, (int)so->so_snd.sb_cc); | |
918 | ourfinisacked = 1; | |
919 | } else { | |
920 | sbdrop(&so->so_snd, acked); | |
921 | tp->snd_wnd -= acked; | |
922 | ourfinisacked = 0; | |
923 | } | |
924 | if (so->so_snd.sb_flags & SB_NOTIFY) | |
925 | sowwakeup(so); | |
926 | tp->snd_una = ti->ti_ack; | |
927 | if (SEQ_LT(tp->snd_nxt, tp->snd_una)) | |
928 | tp->snd_nxt = tp->snd_una; | |
929 | ||
930 | switch (tp->t_state) { | |
931 | ||
932 | /* | |
933 | * In FIN_WAIT_1 STATE in addition to the processing | |
934 | * for the ESTABLISHED state if our FIN is now acknowledged | |
935 | * then enter FIN_WAIT_2. | |
936 | */ | |
937 | case TCPS_FIN_WAIT_1: | |
938 | if (ourfinisacked) { | |
939 | /* | |
940 | * If we can't receive any more | |
941 | * data, then closing user can proceed. | |
942 | * Starting the timer is contrary to the | |
943 | * specification, but if we don't get a FIN | |
944 | * we'll hang forever. | |
945 | */ | |
946 | if (so->so_state & SS_CANTRCVMORE) { | |
947 | soisdisconnected(so); | |
948 | tp->t_timer[TCPT_2MSL] = tcp_maxidle; | |
949 | } | |
950 | tp->t_state = TCPS_FIN_WAIT_2; | |
951 | } | |
952 | break; | |
953 | ||
954 | /* | |
955 | * In CLOSING STATE in addition to the processing for | |
956 | * the ESTABLISHED state if the ACK acknowledges our FIN | |
957 | * then enter the TIME-WAIT state, otherwise ignore | |
958 | * the segment. | |
959 | */ | |
960 | case TCPS_CLOSING: | |
961 | if (ourfinisacked) { | |
962 | tp->t_state = TCPS_TIME_WAIT; | |
963 | tcp_canceltimers(tp); | |
964 | tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL; | |
965 | soisdisconnected(so); | |
966 | } | |
967 | break; | |
968 | ||
969 | /* | |
970 | * In LAST_ACK, we may still be waiting for data to drain | |
971 | * and/or to be acked, as well as for the ack of our FIN. | |
972 | * If our FIN is now acknowledged, delete the TCB, | |
973 | * enter the closed state and return. | |
974 | */ | |
975 | case TCPS_LAST_ACK: | |
976 | if (ourfinisacked) { | |
977 | tp = tcp_close(tp); | |
978 | goto drop; | |
979 | } | |
980 | break; | |
981 | ||
982 | /* | |
983 | * In TIME_WAIT state the only thing that should arrive | |
984 | * is a retransmission of the remote FIN. Acknowledge | |
985 | * it and restart the finack timer. | |
986 | */ | |
987 | case TCPS_TIME_WAIT: | |
988 | tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL; | |
989 | goto dropafterack; | |
990 | } | |
991 | } | |
992 | ||
993 | step6: | |
994 | /* | |
995 | * Update window information. | |
996 | * Don't look at window if no ACK: TAC's send garbage on first SYN. | |
997 | */ | |
998 | if ((tiflags & TH_ACK) && | |
999 | (SEQ_LT(tp->snd_wl1, ti->ti_seq) || tp->snd_wl1 == ti->ti_seq && | |
1000 | (SEQ_LT(tp->snd_wl2, ti->ti_ack) || | |
1001 | tp->snd_wl2 == ti->ti_ack && ti->ti_win > tp->snd_wnd))) { | |
1002 | /* keep track of pure window updates */ | |
1003 | if (ti->ti_len == 0 && | |
1004 | tp->snd_wl2 == ti->ti_ack && ti->ti_win > tp->snd_wnd) | |
1005 | tcpstat.tcps_rcvwinupd++; | |
1006 | tp->snd_wnd = ti->ti_win; | |
1007 | tp->snd_wl1 = ti->ti_seq; | |
1008 | tp->snd_wl2 = ti->ti_ack; | |
1009 | if (tp->snd_wnd > tp->max_sndwnd) | |
1010 | tp->max_sndwnd = tp->snd_wnd; | |
1011 | needoutput = 1; | |
1012 | } | |
1013 | ||
1014 | /* | |
1015 | * Process segments with URG. | |
1016 | */ | |
1017 | if ((tiflags & TH_URG) && ti->ti_urp && | |
1018 | TCPS_HAVERCVDFIN(tp->t_state) == 0) { | |
1019 | /* | |
1020 | * This is a kludge, but if we receive and accept | |
1021 | * random urgent pointers, we'll crash in | |
1022 | * soreceive. It's hard to imagine someone | |
1023 | * actually wanting to send this much urgent data. | |
1024 | */ | |
1025 | if (ti->ti_urp + so->so_rcv.sb_cc > SB_MAX) { | |
1026 | ti->ti_urp = 0; /* XXX */ | |
1027 | tiflags &= ~TH_URG; /* XXX */ | |
1028 | goto dodata; /* XXX */ | |
1029 | } | |
1030 | /* | |
1031 | * If this segment advances the known urgent pointer, | |
1032 | * then mark the data stream. This should not happen | |
1033 | * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since | |
1034 | * a FIN has been received from the remote side. | |
1035 | * In these states we ignore the URG. | |
1036 | * | |
1037 | * According to RFC961 (Assigned Protocols), | |
1038 | * the urgent pointer points to the last octet | |
1039 | * of urgent data. We continue, however, | |
1040 | * to consider it to indicate the first octet | |
1041 | * of data past the urgent section as the original | |
1042 | * spec states (in one of two places). | |
1043 | */ | |
1044 | if (SEQ_GT(ti->ti_seq+ti->ti_urp, tp->rcv_up)) { | |
1045 | tp->rcv_up = ti->ti_seq + ti->ti_urp; | |
1046 | so->so_oobmark = so->so_rcv.sb_cc + | |
1047 | (tp->rcv_up - tp->rcv_nxt) - 1; | |
1048 | if (so->so_oobmark == 0) | |
1049 | so->so_state |= SS_RCVATMARK; | |
1050 | sohasoutofband(so); | |
1051 | tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA); | |
1052 | } | |
1053 | /* | |
1054 | * Remove out of band data so doesn't get presented to user. | |
1055 | * This can happen independent of advancing the URG pointer, | |
1056 | * but if two URG's are pending at once, some out-of-band | |
1057 | * data may creep in... ick. | |
1058 | */ | |
1059 | if (ti->ti_urp <= ti->ti_len | |
1060 | #ifdef SO_OOBINLINE | |
1061 | && (so->so_options & SO_OOBINLINE) == 0 | |
1062 | #endif | |
1063 | ) | |
1064 | tcp_pulloutofband(so, ti, m); | |
1065 | } else | |
1066 | /* | |
1067 | * If no out of band data is expected, | |
1068 | * pull receive urgent pointer along | |
1069 | * with the receive window. | |
1070 | */ | |
1071 | if (SEQ_GT(tp->rcv_nxt, tp->rcv_up)) | |
1072 | tp->rcv_up = tp->rcv_nxt; | |
1073 | dodata: /* XXX */ | |
1074 | ||
1075 | /* | |
1076 | * Process the segment text, merging it into the TCP sequencing queue, | |
1077 | * and arranging for acknowledgment of receipt if necessary. | |
1078 | * This process logically involves adjusting tp->rcv_wnd as data | |
1079 | * is presented to the user (this happens in tcp_usrreq.c, | |
1080 | * case PRU_RCVD). If a FIN has already been received on this | |
1081 | * connection then we just ignore the text. | |
1082 | */ | |
1083 | if ((ti->ti_len || (tiflags&TH_FIN)) && | |
1084 | TCPS_HAVERCVDFIN(tp->t_state) == 0) { | |
1085 | TCP_REASS(tp, ti, m, so, tiflags); | |
1086 | /* | |
1087 | * Note the amount of data that peer has sent into | |
1088 | * our window, in order to estimate the sender's | |
1089 | * buffer size. | |
1090 | */ | |
1091 | len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt); | |
1092 | } else { | |
1093 | m_freem(m); | |
1094 | tiflags &= ~TH_FIN; | |
1095 | } | |
1096 | ||
1097 | /* | |
1098 | * If FIN is received ACK the FIN and let the user know | |
1099 | * that the connection is closing. | |
1100 | */ | |
1101 | if (tiflags & TH_FIN) { | |
1102 | if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { | |
1103 | socantrcvmore(so); | |
1104 | tp->t_flags |= TF_ACKNOW; | |
1105 | tp->rcv_nxt++; | |
1106 | } | |
1107 | switch (tp->t_state) { | |
1108 | ||
1109 | /* | |
1110 | * In SYN_RECEIVED and ESTABLISHED STATES | |
1111 | * enter the CLOSE_WAIT state. | |
1112 | */ | |
1113 | case TCPS_SYN_RECEIVED: | |
1114 | case TCPS_ESTABLISHED: | |
1115 | tp->t_state = TCPS_CLOSE_WAIT; | |
1116 | break; | |
1117 | ||
1118 | /* | |
1119 | * If still in FIN_WAIT_1 STATE FIN has not been acked so | |
1120 | * enter the CLOSING state. | |
1121 | */ | |
1122 | case TCPS_FIN_WAIT_1: | |
1123 | tp->t_state = TCPS_CLOSING; | |
1124 | break; | |
1125 | ||
1126 | /* | |
1127 | * In FIN_WAIT_2 state enter the TIME_WAIT state, | |
1128 | * starting the time-wait timer, turning off the other | |
1129 | * standard timers. | |
1130 | */ | |
1131 | case TCPS_FIN_WAIT_2: | |
1132 | tp->t_state = TCPS_TIME_WAIT; | |
1133 | tcp_canceltimers(tp); | |
1134 | tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL; | |
1135 | soisdisconnected(so); | |
1136 | break; | |
1137 | ||
1138 | /* | |
1139 | * In TIME_WAIT state restart the 2 MSL time_wait timer. | |
1140 | */ | |
1141 | case TCPS_TIME_WAIT: | |
1142 | tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL; | |
1143 | break; | |
1144 | } | |
1145 | } | |
1146 | if (so->so_options & SO_DEBUG) | |
1147 | tcp_trace(TA_INPUT, ostate, tp, &tcp_saveti, 0); | |
1148 | ||
1149 | /* | |
1150 | * Return any desired output. | |
1151 | */ | |
1152 | if (needoutput || (tp->t_flags & TF_ACKNOW)) | |
1153 | (void) tcp_output(tp); | |
1154 | return; | |
1155 | ||
1156 | dropafterack: | |
1157 | /* | |
1158 | * Generate an ACK dropping incoming segment if it occupies | |
1159 | * sequence space, where the ACK reflects our state. | |
1160 | */ | |
1161 | if (tiflags & TH_RST) | |
1162 | goto drop; | |
1163 | m_freem(m); | |
1164 | tp->t_flags |= TF_ACKNOW; | |
1165 | (void) tcp_output(tp); | |
1166 | return; | |
1167 | ||
1168 | dropwithreset: | |
1169 | if (om) { | |
1170 | (void) m_free(om); | |
1171 | om = 0; | |
1172 | } | |
1173 | /* | |
1174 | * Generate a RST, dropping incoming segment. | |
1175 | * Make ACK acceptable to originator of segment. | |
1176 | * Don't bother to respond if destination was broadcast. | |
1177 | */ | |
1178 | if ((tiflags & TH_RST) || m->m_flags & M_BCAST) | |
1179 | goto drop; | |
1180 | if (tiflags & TH_ACK) | |
1181 | tcp_respond(tp, ti, m, (tcp_seq)0, ti->ti_ack, TH_RST); | |
1182 | else { | |
1183 | if (tiflags & TH_SYN) | |
1184 | ti->ti_len++; | |
1185 | tcp_respond(tp, ti, m, ti->ti_seq+ti->ti_len, (tcp_seq)0, | |
1186 | TH_RST|TH_ACK); | |
1187 | } | |
1188 | /* destroy temporarily created socket */ | |
1189 | if (dropsocket) | |
1190 | (void) soabort(so); | |
1191 | return; | |
1192 | ||
1193 | drop: | |
1194 | if (om) | |
1195 | (void) m_free(om); | |
1196 | /* | |
1197 | * Drop space held by incoming segment and return. | |
1198 | */ | |
1199 | if (tp && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) | |
1200 | tcp_trace(TA_DROP, ostate, tp, &tcp_saveti, 0); | |
1201 | m_freem(m); | |
1202 | /* destroy temporarily created socket */ | |
1203 | if (dropsocket) | |
1204 | (void) soabort(so); | |
1205 | return; | |
1206 | } | |
1207 | ||
1208 | tcp_dooptions(tp, om, ti) | |
1209 | struct tcpcb *tp; | |
1210 | struct mbuf *om; | |
1211 | struct tcpiphdr *ti; | |
1212 | { | |
1213 | register u_char *cp; | |
1214 | u_short mss; | |
1215 | int opt, optlen, cnt; | |
1216 | ||
1217 | cp = mtod(om, u_char *); | |
1218 | cnt = om->m_len; | |
1219 | for (; cnt > 0; cnt -= optlen, cp += optlen) { | |
1220 | opt = cp[0]; | |
1221 | if (opt == TCPOPT_EOL) | |
1222 | break; | |
1223 | if (opt == TCPOPT_NOP) | |
1224 | optlen = 1; | |
1225 | else { | |
1226 | optlen = cp[1]; | |
1227 | if (optlen <= 0) | |
1228 | break; | |
1229 | } | |
1230 | switch (opt) { | |
1231 | ||
1232 | default: | |
1233 | continue; | |
1234 | ||
1235 | case TCPOPT_MAXSEG: | |
1236 | if (optlen != 4) | |
1237 | continue; | |
1238 | if (!(ti->ti_flags & TH_SYN)) | |
1239 | continue; | |
1240 | bcopy((char *) cp + 2, (char *) &mss, sizeof(mss)); | |
1241 | NTOHS(mss); | |
1242 | (void) tcp_mss(tp, mss); /* sets t_maxseg */ | |
1243 | break; | |
1244 | } | |
1245 | } | |
1246 | (void) m_free(om); | |
1247 | } | |
1248 | ||
1249 | /* | |
1250 | * Pull out of band byte out of a segment so | |
1251 | * it doesn't appear in the user's data queue. | |
1252 | * It is still reflected in the segment length for | |
1253 | * sequencing purposes. | |
1254 | */ | |
1255 | tcp_pulloutofband(so, ti, m) | |
1256 | struct socket *so; | |
1257 | struct tcpiphdr *ti; | |
1258 | register struct mbuf *m; | |
1259 | { | |
1260 | int cnt = ti->ti_urp - 1; | |
1261 | ||
1262 | while (cnt >= 0) { | |
1263 | if (m->m_len > cnt) { | |
1264 | char *cp = mtod(m, caddr_t) + cnt; | |
1265 | struct tcpcb *tp = sototcpcb(so); | |
1266 | ||
1267 | tp->t_iobc = *cp; | |
1268 | tp->t_oobflags |= TCPOOB_HAVEDATA; | |
1269 | bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1)); | |
1270 | m->m_len--; | |
1271 | return; | |
1272 | } | |
1273 | cnt -= m->m_len; | |
1274 | m = m->m_next; | |
1275 | if (m == 0) | |
1276 | break; | |
1277 | } | |
1278 | panic("tcp_pulloutofband"); | |
1279 | } | |
1280 | ||
1281 | /* | |
1282 | * Collect new round-trip time estimate | |
1283 | * and update averages and current timeout. | |
1284 | */ | |
1285 | tcp_xmit_timer(tp) | |
1286 | register struct tcpcb *tp; | |
1287 | { | |
1288 | register short delta; | |
1289 | ||
1290 | tcpstat.tcps_rttupdated++; | |
1291 | if (tp->t_srtt != 0) { | |
1292 | /* | |
1293 | * srtt is stored as fixed point with 3 bits after the | |
1294 | * binary point (i.e., scaled by 8). The following magic | |
1295 | * is equivalent to the smoothing algorithm in rfc793 with | |
1296 | * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed | |
1297 | * point). Adjust t_rtt to origin 0. | |
1298 | */ | |
1299 | delta = tp->t_rtt - 1 - (tp->t_srtt >> TCP_RTT_SHIFT); | |
1300 | if ((tp->t_srtt += delta) <= 0) | |
1301 | tp->t_srtt = 1; | |
1302 | /* | |
1303 | * We accumulate a smoothed rtt variance (actually, a | |
1304 | * smoothed mean difference), then set the retransmit | |
1305 | * timer to smoothed rtt + 4 times the smoothed variance. | |
1306 | * rttvar is stored as fixed point with 2 bits after the | |
1307 | * binary point (scaled by 4). The following is | |
1308 | * equivalent to rfc793 smoothing with an alpha of .75 | |
1309 | * (rttvar = rttvar*3/4 + |delta| / 4). This replaces | |
1310 | * rfc793's wired-in beta. | |
1311 | */ | |
1312 | if (delta < 0) | |
1313 | delta = -delta; | |
1314 | delta -= (tp->t_rttvar >> TCP_RTTVAR_SHIFT); | |
1315 | if ((tp->t_rttvar += delta) <= 0) | |
1316 | tp->t_rttvar = 1; | |
1317 | } else { | |
1318 | /* | |
1319 | * No rtt measurement yet - use the unsmoothed rtt. | |
1320 | * Set the variance to half the rtt (so our first | |
1321 | * retransmit happens at 2*rtt) | |
1322 | */ | |
1323 | tp->t_srtt = tp->t_rtt << TCP_RTT_SHIFT; | |
1324 | tp->t_rttvar = tp->t_rtt << (TCP_RTTVAR_SHIFT - 1); | |
1325 | } | |
1326 | tp->t_rtt = 0; | |
1327 | tp->t_rxtshift = 0; | |
1328 | ||
1329 | /* | |
1330 | * the retransmit should happen at rtt + 4 * rttvar. | |
1331 | * Because of the way we do the smoothing, srtt and rttvar | |
1332 | * will each average +1/2 tick of bias. When we compute | |
1333 | * the retransmit timer, we want 1/2 tick of rounding and | |
1334 | * 1 extra tick because of +-1/2 tick uncertainty in the | |
1335 | * firing of the timer. The bias will give us exactly the | |
1336 | * 1.5 tick we need. But, because the bias is | |
1337 | * statistical, we have to test that we don't drop below | |
1338 | * the minimum feasible timer (which is 2 ticks). | |
1339 | */ | |
1340 | TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp), | |
1341 | tp->t_rttmin, TCPTV_REXMTMAX); | |
1342 | ||
1343 | /* | |
1344 | * We received an ack for a packet that wasn't retransmitted; | |
1345 | * it is probably safe to discard any error indications we've | |
1346 | * received recently. This isn't quite right, but close enough | |
1347 | * for now (a route might have failed after we sent a segment, | |
1348 | * and the return path might not be symmetrical). | |
1349 | */ | |
1350 | tp->t_softerror = 0; | |
1351 | } | |
1352 | ||
1353 | /* | |
1354 | * Determine a reasonable value for maxseg size. | |
1355 | * If the route is known, check route for mtu. | |
1356 | * If none, use an mss that can be handled on the outgoing | |
1357 | * interface without forcing IP to fragment; if bigger than | |
1358 | * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES | |
1359 | * to utilize large mbufs. If no route is found, route has no mtu, | |
1360 | * or the destination isn't local, use a default, hopefully conservative | |
1361 | * size (usually 512 or the default IP max size, but no more than the mtu | |
1362 | * of the interface), as we can't discover anything about intervening | |
1363 | * gateways or networks. We also initialize the congestion/slow start | |
1364 | * window to be a single segment if the destination isn't local. | |
1365 | * While looking at the routing entry, we also initialize other path-dependent | |
1366 | * parameters from pre-set or cached values in the routing entry. | |
1367 | */ | |
1368 | ||
1369 | tcp_mss(tp, offer) | |
1370 | register struct tcpcb *tp; | |
1371 | u_short offer; | |
1372 | { | |
1373 | struct route *ro; | |
1374 | register struct rtentry *rt; | |
1375 | struct ifnet *ifp; | |
1376 | register int rtt, mss; | |
1377 | u_long bufsize; | |
1378 | struct inpcb *inp; | |
1379 | struct socket *so; | |
1380 | extern int tcp_mssdflt, tcp_rttdflt; | |
1381 | ||
1382 | inp = tp->t_inpcb; | |
1383 | ro = &inp->inp_route; | |
1384 | ||
1385 | if ((rt = ro->ro_rt) == (struct rtentry *)0) { | |
1386 | /* No route yet, so try to acquire one */ | |
1387 | if (inp->inp_faddr.s_addr != INADDR_ANY) { | |
1388 | ro->ro_dst.sa_family = AF_INET; | |
1389 | ro->ro_dst.sa_len = sizeof(ro->ro_dst); | |
1390 | ((struct sockaddr_in *) &ro->ro_dst)->sin_addr = | |
1391 | inp->inp_faddr; | |
1392 | rtalloc(ro); | |
1393 | } | |
1394 | if ((rt = ro->ro_rt) == (struct rtentry *)0) | |
1395 | return (tcp_mssdflt); | |
1396 | } | |
1397 | ifp = rt->rt_ifp; | |
1398 | so = inp->inp_socket; | |
1399 | ||
1400 | #ifdef RTV_MTU /* if route characteristics exist ... */ | |
1401 | /* | |
1402 | * While we're here, check if there's an initial rtt | |
1403 | * or rttvar. Convert from the route-table units | |
1404 | * to scaled multiples of the slow timeout timer. | |
1405 | */ | |
1406 | if (tp->t_srtt == 0 && (rtt = rt->rt_rmx.rmx_rtt)) { | |
1407 | if (rt->rt_rmx.rmx_locks & RTV_MTU) | |
1408 | tp->t_rttmin = rtt / (RTM_RTTUNIT / PR_SLOWHZ); | |
1409 | tp->t_srtt = rtt / (RTM_RTTUNIT / (PR_SLOWHZ * TCP_RTT_SCALE)); | |
1410 | if (rt->rt_rmx.rmx_rttvar) | |
1411 | tp->t_rttvar = rt->rt_rmx.rmx_rttvar / | |
1412 | (RTM_RTTUNIT / (PR_SLOWHZ * TCP_RTTVAR_SCALE)); | |
1413 | else | |
1414 | /* default variation is +- 1 rtt */ | |
1415 | tp->t_rttvar = | |
1416 | tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE; | |
1417 | TCPT_RANGESET(tp->t_rxtcur, | |
1418 | ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1, | |
1419 | tp->t_rttmin, TCPTV_REXMTMAX); | |
1420 | } | |
1421 | /* | |
1422 | * if there's an mtu associated with the route, use it | |
1423 | */ | |
1424 | if (rt->rt_rmx.rmx_mtu) | |
1425 | mss = rt->rt_rmx.rmx_mtu - sizeof(struct tcpiphdr); | |
1426 | else | |
1427 | #endif /* RTV_MTU */ | |
1428 | { | |
1429 | mss = ifp->if_mtu - sizeof(struct tcpiphdr); | |
1430 | #if (MCLBYTES & (MCLBYTES - 1)) == 0 | |
1431 | if (mss > MCLBYTES) | |
1432 | mss &= ~(MCLBYTES-1); | |
1433 | #else | |
1434 | if (mss > MCLBYTES) | |
1435 | mss = mss / MCLBYTES * MCLBYTES; | |
1436 | #endif | |
1437 | if (!in_localaddr(inp->inp_faddr)) | |
1438 | mss = min(mss, tcp_mssdflt); | |
1439 | } | |
1440 | /* | |
1441 | * The current mss, t_maxseg, is initialized to the default value. | |
1442 | * If we compute a smaller value, reduce the current mss. | |
1443 | * If we compute a larger value, return it for use in sending | |
1444 | * a max seg size option, but don't store it for use | |
1445 | * unless we received an offer at least that large from peer. | |
1446 | * However, do not accept offers under 32 bytes. | |
1447 | */ | |
1448 | if (offer) | |
1449 | mss = min(mss, offer); | |
1450 | mss = max(mss, 32); /* sanity */ | |
1451 | if (mss < tp->t_maxseg || offer != 0) { | |
1452 | /* | |
1453 | * If there's a pipesize, change the socket buffer | |
1454 | * to that size. Make the socket buffers an integral | |
1455 | * number of mss units; if the mss is larger than | |
1456 | * the socket buffer, decrease the mss. | |
1457 | */ | |
1458 | #ifdef RTV_SPIPE | |
1459 | if ((bufsize = rt->rt_rmx.rmx_sendpipe) == 0) | |
1460 | #endif | |
1461 | bufsize = so->so_snd.sb_hiwat; | |
1462 | if (bufsize < mss) | |
1463 | mss = bufsize; | |
1464 | else { | |
1465 | bufsize = min(bufsize, SB_MAX) / mss * mss; | |
1466 | (void) sbreserve(&so->so_snd, bufsize); | |
1467 | } | |
1468 | tp->t_maxseg = mss; | |
1469 | ||
1470 | #ifdef RTV_RPIPE | |
1471 | if ((bufsize = rt->rt_rmx.rmx_recvpipe) == 0) | |
1472 | #endif | |
1473 | bufsize = so->so_rcv.sb_hiwat; | |
1474 | if (bufsize > mss) { | |
1475 | bufsize = min(bufsize, SB_MAX) / mss * mss; | |
1476 | (void) sbreserve(&so->so_rcv, bufsize); | |
1477 | } | |
1478 | } | |
1479 | tp->snd_cwnd = mss; | |
1480 | ||
1481 | #ifdef RTV_SSTHRESH | |
1482 | if (rt->rt_rmx.rmx_ssthresh) { | |
1483 | /* | |
1484 | * There's some sort of gateway or interface | |
1485 | * buffer limit on the path. Use this to set | |
1486 | * the slow start threshhold, but set the | |
1487 | * threshold to no less than 2*mss. | |
1488 | */ | |
1489 | tp->snd_ssthresh = max(2 * mss, rt->rt_rmx.rmx_ssthresh); | |
1490 | } | |
1491 | #endif /* RTV_MTU */ | |
1492 | return (mss); | |
1493 | } |