Line data Source code
1 : /*
2 : * Copyright (c) 2016-2019 Cisco and/or its affiliates.
3 : * Licensed under the Apache License, Version 2.0 (the "License");
4 : * you may not use this file except in compliance with the License.
5 : * You may obtain a copy of the License at:
6 : *
7 : * http://www.apache.org/licenses/LICENSE-2.0
8 : *
9 : * Unless required by applicable law or agreed to in writing, software
10 : * distributed under the License is distributed on an "AS IS" BASIS,
11 : * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : * See the License for the specific language governing permissions and
13 : * limitations under the License.
14 : */
15 :
16 : #include <vppinfra/sparse_vec.h>
17 : #include <vnet/fib/ip4_fib.h>
18 : #include <vnet/fib/ip6_fib.h>
19 : #include <vnet/tcp/tcp.h>
20 : #include <vnet/tcp/tcp_inlines.h>
21 : #include <vnet/session/session.h>
22 : #include <math.h>
23 :
24 : static vlib_error_desc_t tcp_input_error_counters[] = {
25 : #define tcp_error(f, n, s, d) { #n, d, VL_COUNTER_SEVERITY_##s },
26 : #include <vnet/tcp/tcp_error.def>
27 : #undef tcp_error
28 : };
29 :
30 : typedef enum _tcp_input_next
31 : {
32 : TCP_INPUT_NEXT_DROP,
33 : TCP_INPUT_NEXT_LISTEN,
34 : TCP_INPUT_NEXT_RCV_PROCESS,
35 : TCP_INPUT_NEXT_SYN_SENT,
36 : TCP_INPUT_NEXT_ESTABLISHED,
37 : TCP_INPUT_NEXT_RESET,
38 : TCP_INPUT_NEXT_PUNT,
39 : TCP_INPUT_N_NEXT
40 : } tcp_input_next_t;
41 :
42 : /**
43 : * Validate segment sequence number. As per RFC793:
44 : *
45 : * Segment Receive Test
46 : * Length Window
47 : * ------- ------- -------------------------------------------
48 : * 0 0 SEG.SEQ = RCV.NXT
49 : * 0 >0 RCV.NXT =< SEG.SEQ < RCV.NXT+RCV.WND
50 : * >0 0 not acceptable
51 : * >0 >0 RCV.NXT =< SEG.SEQ < RCV.NXT+RCV.WND
52 : * or RCV.NXT =< SEG.SEQ+SEG.LEN-1 < RCV.NXT+RCV.WND
53 : *
54 : * This ultimately consists in checking if segment falls within the window.
55 : * The one important difference compared to RFC793 is that we use rcv_las,
56 : * or the rcv_nxt at last ack sent instead of rcv_nxt since that's the
57 : * peer's reference when computing our receive window.
58 : *
59 : * This:
60 : * seq_leq (end_seq, tc->rcv_las + tc->rcv_wnd) && seq_geq (seq, tc->rcv_las)
61 : * however, is too strict when we have retransmits. Instead we just check that
62 : * the seq is not beyond the right edge and that the end of the segment is not
63 : * less than the left edge.
64 : *
65 : * N.B. rcv_nxt and rcv_wnd are both updated in this node if acks are sent, so
66 : * use rcv_nxt in the right edge window test instead of rcv_las.
67 : *
68 : */
69 : always_inline u8
70 1062070 : tcp_segment_in_rcv_wnd (tcp_connection_t * tc, u32 seq, u32 end_seq)
71 : {
72 1062070 : return (seq_geq (end_seq, tc->rcv_las)
73 1062070 : && seq_leq (seq, tc->rcv_nxt + tc->rcv_wnd));
74 : }
75 :
76 : /**
77 : * RFC1323: Check against wrapped sequence numbers (PAWS). If we have
78 : * timestamp to echo and it's less than tsval_recent, drop segment
79 : * but still send an ACK in order to retain TCP's mechanism for detecting
80 : * and recovering from half-open connections
81 : *
82 : * Or at least that's what the theory says. It seems that this might not work
83 : * very well with packet reordering and fast retransmit. XXX
84 : */
85 : always_inline int
86 1062070 : tcp_segment_check_paws (tcp_connection_t * tc)
87 : {
88 1062070 : return tcp_opts_tstamp (&tc->rcv_opts)
89 1062070 : && timestamp_lt (tc->rcv_opts.tsval, tc->tsval_recent);
90 : }
91 :
92 : /**
93 : * Update tsval recent
94 : */
95 : always_inline void
96 1062070 : tcp_update_timestamp (tcp_connection_t * tc, u32 seq, u32 seq_end)
97 : {
98 : /*
99 : * RFC1323: If Last.ACK.sent falls within the range of sequence numbers
100 : * of an incoming segment:
101 : * SEG.SEQ <= Last.ACK.sent < SEG.SEQ + SEG.LEN
102 : * then the TSval from the segment is copied to TS.Recent;
103 : * otherwise, the TSval is ignored.
104 : */
105 1062070 : if (tcp_opts_tstamp (&tc->rcv_opts) && seq_leq (seq, tc->rcv_las)
106 74390 : && seq_leq (tc->rcv_las, seq_end))
107 : {
108 74390 : ASSERT (timestamp_leq (tc->tsval_recent, tc->rcv_opts.tsval));
109 74390 : tc->tsval_recent = tc->rcv_opts.tsval;
110 74390 : tc->tsval_recent_age = tcp_time_tstamp (tc->c_thread_index);
111 : }
112 1062070 : }
113 :
114 : static void
115 4 : tcp_handle_rst (tcp_connection_t * tc)
116 : {
117 4 : switch (tc->rst_state)
118 : {
119 0 : case TCP_STATE_SYN_RCVD:
120 : /* Cleanup everything. App wasn't notified yet */
121 0 : session_transport_delete_notify (&tc->connection);
122 0 : tcp_connection_cleanup (tc);
123 0 : break;
124 0 : case TCP_STATE_SYN_SENT:
125 0 : session_stream_connect_notify (&tc->connection, SESSION_E_REFUSED);
126 0 : tcp_connection_cleanup (tc);
127 0 : break;
128 4 : case TCP_STATE_ESTABLISHED:
129 4 : session_transport_reset_notify (&tc->connection);
130 4 : session_transport_closed_notify (&tc->connection);
131 4 : break;
132 0 : case TCP_STATE_CLOSE_WAIT:
133 : case TCP_STATE_FIN_WAIT_1:
134 : case TCP_STATE_FIN_WAIT_2:
135 : case TCP_STATE_CLOSING:
136 : case TCP_STATE_LAST_ACK:
137 0 : session_transport_closed_notify (&tc->connection);
138 0 : break;
139 0 : case TCP_STATE_CLOSED:
140 : case TCP_STATE_TIME_WAIT:
141 0 : break;
142 4 : default:
143 : TCP_DBG ("reset state: %u", tc->state);
144 : }
145 4 : }
146 :
147 : static void
148 4 : tcp_program_reset_ntf (tcp_worker_ctx_t * wrk, tcp_connection_t * tc)
149 : {
150 4 : if (!tcp_disconnect_pending (tc))
151 : {
152 4 : tc->rst_state = tc->state;
153 4 : vec_add1 (wrk->pending_resets, tc->c_c_index);
154 4 : tcp_disconnect_pending_on (tc);
155 : }
156 4 : }
157 :
158 : /**
159 : * Handle reset packet
160 : *
161 : * Programs disconnect/reset notification that should be sent
162 : * later by calling @ref tcp_handle_disconnects
163 : */
164 : static void
165 4 : tcp_rcv_rst (tcp_worker_ctx_t * wrk, tcp_connection_t * tc)
166 : {
167 : TCP_EVT (TCP_EVT_RST_RCVD, tc);
168 4 : switch (tc->state)
169 : {
170 0 : case TCP_STATE_SYN_RCVD:
171 0 : tcp_program_reset_ntf (wrk, tc);
172 0 : tcp_connection_set_state (tc, TCP_STATE_CLOSED);
173 0 : break;
174 0 : case TCP_STATE_SYN_SENT:
175 : /* Do not program ntf because the connection is half-open */
176 0 : tc->rst_state = tc->state;
177 0 : tcp_handle_rst (tc);
178 0 : break;
179 4 : case TCP_STATE_ESTABLISHED:
180 4 : tcp_connection_timers_reset (tc);
181 4 : tcp_cong_recovery_off (tc);
182 4 : tcp_program_reset_ntf (wrk, tc);
183 4 : tcp_connection_set_state (tc, TCP_STATE_CLOSED);
184 4 : tcp_program_cleanup (wrk, tc);
185 4 : break;
186 0 : case TCP_STATE_CLOSE_WAIT:
187 : case TCP_STATE_FIN_WAIT_1:
188 : case TCP_STATE_FIN_WAIT_2:
189 : case TCP_STATE_CLOSING:
190 : case TCP_STATE_LAST_ACK:
191 0 : tcp_connection_timers_reset (tc);
192 0 : tcp_cong_recovery_off (tc);
193 0 : tcp_program_reset_ntf (wrk, tc);
194 : /* Make sure we mark the session as closed. In some states we may
195 : * be still trying to send data */
196 0 : tcp_connection_set_state (tc, TCP_STATE_CLOSED);
197 0 : tcp_program_cleanup (wrk, tc);
198 0 : break;
199 0 : case TCP_STATE_CLOSED:
200 : case TCP_STATE_TIME_WAIT:
201 0 : break;
202 4 : default:
203 : TCP_DBG ("reset state: %u", tc->state);
204 : }
205 4 : }
206 :
207 : /**
208 : * Validate incoming segment as per RFC793 p. 69 and RFC1323 p. 19
209 : *
210 : * It first verifies if segment has a wrapped sequence number (PAWS) and then
211 : * does the processing associated to the first four steps (ignoring security
212 : * and precedence): sequence number, rst bit and syn bit checks.
213 : *
214 : * @return 0 if segments passes validation.
215 : */
216 : static int
217 1062070 : tcp_segment_validate (tcp_worker_ctx_t * wrk, tcp_connection_t * tc0,
218 : vlib_buffer_t * b0, tcp_header_t * th0, u32 * error0)
219 : {
220 : /* We could get a burst of RSTs interleaved with acks */
221 1062070 : if (PREDICT_FALSE (tc0->state == TCP_STATE_CLOSED))
222 : {
223 2 : tcp_send_reset (tc0);
224 2 : *error0 = TCP_ERROR_CONNECTION_CLOSED;
225 2 : goto error;
226 : }
227 :
228 1062070 : if (PREDICT_FALSE (!tcp_ack (th0) && !tcp_rst (th0) && !tcp_syn (th0)))
229 : {
230 0 : *error0 = TCP_ERROR_SEGMENT_INVALID;
231 0 : goto error;
232 : }
233 :
234 1062070 : if (PREDICT_FALSE (tcp_options_parse (th0, &tc0->rcv_opts, 0)))
235 : {
236 0 : *error0 = TCP_ERROR_OPTIONS;
237 0 : goto error;
238 : }
239 :
240 1062070 : if (PREDICT_FALSE (tcp_segment_check_paws (tc0)))
241 : {
242 0 : *error0 = TCP_ERROR_PAWS;
243 : TCP_EVT (TCP_EVT_PAWS_FAIL, tc0, vnet_buffer (b0)->tcp.seq_number,
244 : vnet_buffer (b0)->tcp.seq_end);
245 :
246 : /* If it just so happens that a segment updates tsval_recent for a
247 : * segment over 24 days old, invalidate tsval_recent. */
248 0 : if (timestamp_lt (tc0->tsval_recent_age + TCP_PAWS_IDLE,
249 : tcp_time_tstamp (tc0->c_thread_index)))
250 : {
251 0 : tc0->tsval_recent = tc0->rcv_opts.tsval;
252 0 : clib_warning ("paws failed: 24-day old segment");
253 : }
254 : /* Drop after ack if not rst. Resets can fail paws check as per
255 : * RFC 7323 sec. 5.2: When an <RST> segment is received, it MUST NOT
256 : * be subjected to the PAWS check by verifying an acceptable value in
257 : * SEG.TSval */
258 0 : else if (!tcp_rst (th0))
259 : {
260 0 : tcp_program_ack (tc0);
261 : TCP_EVT (TCP_EVT_DUPACK_SENT, tc0, vnet_buffer (b0)->tcp);
262 0 : goto error;
263 : }
264 : }
265 :
266 : /* 1st: check sequence number */
267 1062070 : if (!tcp_segment_in_rcv_wnd (tc0, vnet_buffer (b0)->tcp.seq_number,
268 1062070 : vnet_buffer (b0)->tcp.seq_end))
269 : {
270 : /* SYN/SYN-ACK retransmit */
271 0 : if (tcp_syn (th0)
272 0 : && vnet_buffer (b0)->tcp.seq_number == tc0->rcv_nxt - 1)
273 : {
274 0 : tcp_options_parse (th0, &tc0->rcv_opts, 1);
275 0 : if (tc0->state == TCP_STATE_SYN_RCVD)
276 : {
277 0 : tcp_send_synack (tc0);
278 : TCP_EVT (TCP_EVT_SYN_RCVD, tc0, 0);
279 0 : *error0 = TCP_ERROR_SYNS_RCVD;
280 : }
281 : else
282 : {
283 0 : tcp_program_ack (tc0);
284 : TCP_EVT (TCP_EVT_SYNACK_RCVD, tc0);
285 0 : *error0 = TCP_ERROR_SYN_ACKS_RCVD;
286 : }
287 0 : goto error;
288 : }
289 :
290 : /* If our window is 0 and the packet is in sequence, let it pass
291 : * through for ack processing. It should be dropped later. */
292 0 : if (tc0->rcv_wnd < tc0->snd_mss
293 0 : && tc0->rcv_nxt == vnet_buffer (b0)->tcp.seq_number)
294 0 : goto check_reset;
295 :
296 : /* If we entered recovery and peer did so as well, there's a chance that
297 : * dup acks won't be acceptable on either end because seq_end may be less
298 : * than rcv_las. This can happen if acks are lost in both directions. */
299 0 : if (tcp_in_recovery (tc0)
300 0 : && seq_geq (vnet_buffer (b0)->tcp.seq_number,
301 : tc0->rcv_las - tc0->rcv_wnd)
302 0 : && seq_leq (vnet_buffer (b0)->tcp.seq_end,
303 : tc0->rcv_nxt + tc0->rcv_wnd))
304 0 : goto check_reset;
305 :
306 0 : *error0 = TCP_ERROR_RCV_WND;
307 :
308 : /* If we advertised a zero rcv_wnd and the segment is in the past or the
309 : * next one that we expect, it is probably a window probe */
310 0 : if ((tc0->flags & TCP_CONN_ZERO_RWND_SENT)
311 0 : && seq_lt (vnet_buffer (b0)->tcp.seq_end,
312 : tc0->rcv_las + tc0->rcv_opts.mss))
313 0 : *error0 = TCP_ERROR_ZERO_RWND;
314 :
315 0 : tc0->errors.below_data_wnd += seq_lt (vnet_buffer (b0)->tcp.seq_end,
316 : tc0->rcv_las);
317 :
318 : /* If not RST, send dup ack */
319 0 : if (!tcp_rst (th0))
320 : {
321 0 : tcp_program_dupack (tc0);
322 : TCP_EVT (TCP_EVT_DUPACK_SENT, tc0, vnet_buffer (b0)->tcp);
323 : }
324 0 : goto error;
325 :
326 1062070 : check_reset:
327 : ;
328 : }
329 :
330 : /* 2nd: check the RST bit */
331 1062070 : if (PREDICT_FALSE (tcp_rst (th0)))
332 : {
333 4 : tcp_rcv_rst (wrk, tc0);
334 4 : *error0 = TCP_ERROR_RST_RCVD;
335 4 : goto error;
336 : }
337 :
338 : /* 3rd: check security and precedence (skip) */
339 :
340 : /* 4th: check the SYN bit (in window) */
341 1062070 : if (PREDICT_FALSE (tcp_syn (th0)))
342 : {
343 : /* As per RFC5961 send challenge ack instead of reset */
344 0 : tcp_program_ack (tc0);
345 0 : *error0 = TCP_ERROR_SPURIOUS_SYN;
346 0 : goto error;
347 : }
348 :
349 : /* If segment in window, save timestamp */
350 1062070 : tcp_update_timestamp (tc0, vnet_buffer (b0)->tcp.seq_number,
351 1062070 : vnet_buffer (b0)->tcp.seq_end);
352 1062070 : return 0;
353 :
354 6 : error:
355 6 : return -1;
356 : }
357 :
358 : always_inline int
359 387 : tcp_rcv_ack_no_cc (tcp_connection_t * tc, vlib_buffer_t * b, u32 * error)
360 : {
361 : /* SND.UNA =< SEG.ACK =< SND.NXT */
362 387 : if (!(seq_leq (tc->snd_una, vnet_buffer (b)->tcp.ack_number)
363 387 : && seq_leq (vnet_buffer (b)->tcp.ack_number, tc->snd_nxt)))
364 : {
365 0 : if (seq_leq (vnet_buffer (b)->tcp.ack_number, tc->snd_nxt)
366 0 : && seq_gt (vnet_buffer (b)->tcp.ack_number, tc->snd_una))
367 : {
368 0 : tc->snd_nxt = vnet_buffer (b)->tcp.ack_number;
369 0 : goto acceptable;
370 : }
371 0 : *error = TCP_ERROR_ACK_INVALID;
372 0 : return -1;
373 : }
374 :
375 387 : acceptable:
376 387 : tc->bytes_acked = vnet_buffer (b)->tcp.ack_number - tc->snd_una;
377 387 : tc->snd_una = vnet_buffer (b)->tcp.ack_number;
378 387 : *error = TCP_ERROR_ACK_OK;
379 387 : return 0;
380 : }
381 :
382 : /**
383 : * Compute smoothed RTT as per VJ's '88 SIGCOMM and RFC6298
384 : *
385 : * Note that although in the original article srtt and rttvar are scaled
386 : * to minimize round-off errors, here we don't. Instead, we rely on
387 : * better precision time measurements.
388 : *
389 : * A known limitation of the algorithm is that a drop in rtt results in a
390 : * rttvar increase and bigger RTO.
391 : *
392 : * mrtt must be provided in @ref TCP_TICK multiples, i.e., in us. Note that
393 : * timestamps are measured as ms ticks so they must be converted before
394 : * calling this function.
395 : */
396 : static void
397 41698 : tcp_estimate_rtt (tcp_connection_t * tc, u32 mrtt)
398 : {
399 : int err, diff;
400 :
401 41698 : err = mrtt - tc->srtt;
402 41698 : tc->srtt = clib_max ((int) tc->srtt + (err >> 3), 1);
403 41698 : diff = (clib_abs (err) - (int) tc->rttvar) >> 2;
404 41698 : tc->rttvar = clib_max ((int) tc->rttvar + diff, 1);
405 41698 : }
406 :
407 : static inline void
408 28749 : tcp_estimate_rtt_us (tcp_connection_t * tc, f64 mrtt)
409 : {
410 28749 : tc->mrtt_us = tc->mrtt_us + (mrtt - tc->mrtt_us) * 0.125;
411 28749 : }
412 :
413 : /**
414 : * Update rtt estimate
415 : *
416 : * We have potentially three sources of rtt measurements:
417 : *
418 : * TSOPT difference between current and echoed timestamp. It has ms
419 : * precision and can be computed per ack
420 : * ACK timing one sequence number is tracked per rtt with us (micro second)
421 : * precision.
422 : * rate sample if enabled, all outstanding bytes are tracked with us
423 : * precision. Every ack and sack are a rtt sample
424 : *
425 : * Middle boxes are known to fiddle with TCP options so we give higher
426 : * priority to ACK timing.
427 : *
428 : * For now, rate sample rtts are only used under congestion.
429 : */
430 : static int
431 41698 : tcp_update_rtt (tcp_connection_t * tc, tcp_rate_sample_t * rs, u32 ack)
432 : {
433 41698 : u32 mrtt = 0;
434 :
435 : /* Karn's rule, part 1. Don't use retransmitted segments to estimate
436 : * RTT because they're ambiguous. */
437 41698 : if (tcp_in_cong_recovery (tc))
438 : {
439 : /* Accept rtt estimates for samples that have not been retransmitted */
440 0 : if (!(tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
441 0 : || (rs->flags & TCP_BTS_IS_RXT))
442 0 : goto done;
443 0 : if (rs->rtt_time)
444 0 : tcp_estimate_rtt_us (tc, rs->rtt_time);
445 0 : mrtt = rs->rtt_time * THZ;
446 0 : goto estimate_rtt;
447 : }
448 :
449 41698 : if (tc->rtt_ts && seq_geq (ack, tc->rtt_seq))
450 28749 : {
451 28749 : f64 sample = tcp_time_now_us (tc->c_thread_index) - tc->rtt_ts;
452 28749 : tcp_estimate_rtt_us (tc, sample);
453 28749 : mrtt = clib_max ((u32) (sample * THZ), 1);
454 : /* Allow measuring of a new RTT */
455 28749 : tc->rtt_ts = 0;
456 : }
457 : /* As per RFC7323 TSecr can be used for RTTM only if the segment advances
458 : * snd_una, i.e., the left side of the send window:
459 : * seq_lt (tc->snd_una, ack). This is a condition for calling update_rtt */
460 12949 : else if (tcp_opts_tstamp (&tc->rcv_opts) && tc->rcv_opts.tsecr)
461 : {
462 12949 : mrtt = clib_max (tcp_tstamp (tc) - tc->rcv_opts.tsecr, 1);
463 12949 : mrtt *= TCP_TSTP_TO_HZ;
464 : }
465 :
466 0 : estimate_rtt:
467 :
468 : /* Ignore dubious measurements */
469 41698 : if (mrtt == 0 || mrtt > TCP_RTT_MAX)
470 0 : goto done;
471 :
472 41698 : tcp_estimate_rtt (tc, mrtt);
473 :
474 41698 : done:
475 :
476 : /* If we got here something must've been ACKed so make sure boff is 0,
477 : * even if mrtt is not valid since we update the rto lower */
478 41698 : tc->rto_boff = 0;
479 41698 : tcp_update_rto (tc);
480 :
481 41698 : return 0;
482 : }
483 :
484 : static void
485 264 : tcp_estimate_initial_rtt (tcp_connection_t * tc)
486 : {
487 264 : u8 thread_index = vlib_num_workers ()? 1 : 0;
488 : int mrtt;
489 :
490 264 : if (tc->rtt_ts)
491 : {
492 264 : tc->mrtt_us = tcp_time_now_us (thread_index) - tc->rtt_ts;
493 264 : tc->mrtt_us = clib_max (tc->mrtt_us, 0.0001);
494 264 : mrtt = clib_max ((u32) (tc->mrtt_us * THZ), 1);
495 264 : tc->rtt_ts = 0;
496 : }
497 : else
498 : {
499 0 : mrtt = tcp_tstamp (tc) - tc->rcv_opts.tsecr;
500 0 : mrtt = clib_max (mrtt, 1) * TCP_TSTP_TO_HZ;
501 : /* Due to retransmits we don't know the initial mrtt */
502 0 : if (tc->rto_boff && mrtt > 1 * THZ)
503 0 : mrtt = 1 * THZ;
504 0 : tc->mrtt_us = (f64) mrtt *TCP_TICK;
505 : }
506 :
507 264 : if (mrtt > 0 && mrtt < TCP_RTT_MAX)
508 : {
509 : /* First measurement as per RFC 6298 */
510 264 : tc->srtt = mrtt;
511 264 : tc->rttvar = mrtt >> 1;
512 : }
513 264 : tcp_update_rto (tc);
514 264 : }
515 :
516 : /**
517 : * Dequeue bytes for connections that have received acks in last burst
518 : */
519 : static void
520 38104 : tcp_handle_postponed_dequeues (tcp_worker_ctx_t * wrk)
521 : {
522 38104 : u32 thread_index = wrk->vm->thread_index;
523 : u32 *pending_deq_acked;
524 : tcp_connection_t *tc;
525 : int i;
526 :
527 38104 : if (!vec_len (wrk->pending_deq_acked))
528 256 : return;
529 :
530 37848 : pending_deq_acked = wrk->pending_deq_acked;
531 79546 : for (i = 0; i < vec_len (pending_deq_acked); i++)
532 : {
533 41698 : tc = tcp_connection_get (pending_deq_acked[i], thread_index);
534 41698 : tc->flags &= ~TCP_CONN_DEQ_PENDING;
535 :
536 41698 : if (PREDICT_FALSE (!tc->burst_acked))
537 127 : continue;
538 :
539 : /* Dequeue the newly ACKed bytes */
540 41571 : session_tx_fifo_dequeue_drop (&tc->connection, tc->burst_acked);
541 41571 : tcp_validate_txf_size (tc, tc->snd_nxt - tc->snd_una);
542 :
543 41571 : if (tcp_is_descheduled (tc))
544 8 : tcp_reschedule (tc);
545 :
546 : /* If everything has been acked, stop retransmit timer
547 : * otherwise update. */
548 41571 : tcp_retransmit_timer_update (&wrk->timer_wheel, tc);
549 :
550 : /* Update pacer based on our new cwnd estimate */
551 41571 : tcp_connection_tx_pacer_update (tc);
552 :
553 41571 : tc->burst_acked = 0;
554 : }
555 37848 : vec_set_len (wrk->pending_deq_acked, 0);
556 : }
557 :
558 : static void
559 41698 : tcp_program_dequeue (tcp_worker_ctx_t * wrk, tcp_connection_t * tc)
560 : {
561 41698 : if (!(tc->flags & TCP_CONN_DEQ_PENDING))
562 : {
563 41698 : vec_add1 (wrk->pending_deq_acked, tc->c_c_index);
564 41698 : tc->flags |= TCP_CONN_DEQ_PENDING;
565 : }
566 41698 : tc->burst_acked += tc->bytes_acked;
567 41698 : }
568 :
569 : /**
570 : * Try to update snd_wnd based on feedback received from peer.
571 : *
572 : * If successful, and new window is 'effectively' 0, activate persist
573 : * timer.
574 : */
575 : static void
576 1061680 : tcp_update_snd_wnd (tcp_connection_t * tc, u32 seq, u32 ack, u32 snd_wnd)
577 : {
578 : /* If (SND.WL1 < SEG.SEQ or (SND.WL1 = SEG.SEQ and SND.WL2 =< SEG.ACK)), set
579 : * SND.WND <- SEG.WND, set SND.WL1 <- SEG.SEQ, and set SND.WL2 <- SEG.ACK */
580 1061680 : if (seq_lt (tc->snd_wl1, seq)
581 32547 : || (tc->snd_wl1 == seq && seq_leq (tc->snd_wl2, ack)))
582 : {
583 1061680 : tc->snd_wnd = snd_wnd;
584 1061680 : tc->snd_wl1 = seq;
585 1061680 : tc->snd_wl2 = ack;
586 : TCP_EVT (TCP_EVT_SND_WND, tc);
587 :
588 1061680 : if (PREDICT_FALSE (tc->snd_wnd < tc->snd_mss))
589 : {
590 0 : if (!tcp_timer_is_active (tc, TCP_TIMER_RETRANSMIT))
591 : {
592 0 : tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
593 :
594 : /* Set persist timer if we just got 0 wnd. If already set,
595 : * update it because some data sent with snd_wnd < snd_mss was
596 : * acked. */
597 0 : if (tcp_timer_is_active (tc, TCP_TIMER_PERSIST))
598 0 : tcp_persist_timer_reset (&wrk->timer_wheel, tc);
599 0 : tcp_persist_timer_set (&wrk->timer_wheel, tc);
600 : }
601 : }
602 : else
603 : {
604 1061680 : if (PREDICT_FALSE (tcp_timer_is_active (tc, TCP_TIMER_PERSIST)))
605 : {
606 0 : tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
607 0 : tcp_persist_timer_reset (&wrk->timer_wheel, tc);
608 : }
609 :
610 1061680 : if (PREDICT_FALSE (tcp_is_descheduled (tc)))
611 48192 : tcp_reschedule (tc);
612 :
613 1061680 : if (PREDICT_FALSE (!tcp_in_recovery (tc) && tc->rto_boff > 0))
614 : {
615 0 : tc->rto_boff = 0;
616 0 : tcp_update_rto (tc);
617 : }
618 : }
619 : }
620 1061680 : }
621 :
622 : /**
623 : * Init loss recovery/fast recovery.
624 : *
625 : * Triggered by dup acks as opposed to timer timeout. Note that cwnd is
626 : * updated in @ref tcp_cc_handle_event after fast retransmit
627 : */
628 : static void
629 0 : tcp_cc_init_congestion (tcp_connection_t * tc)
630 : {
631 0 : tcp_fastrecovery_on (tc);
632 0 : tc->snd_congestion = tc->snd_nxt;
633 0 : tc->cwnd_acc_bytes = 0;
634 0 : tc->snd_rxt_bytes = 0;
635 0 : tc->rxt_delivered = 0;
636 0 : tc->prr_delivered = 0;
637 0 : tc->prr_start = tc->snd_una;
638 0 : tc->prev_ssthresh = tc->ssthresh;
639 0 : tc->prev_cwnd = tc->cwnd;
640 :
641 0 : tc->snd_rxt_ts = tcp_tstamp (tc);
642 0 : tcp_cc_congestion (tc);
643 :
644 : /* Post retransmit update cwnd to ssthresh and account for the
645 : * three segments that have left the network and should've been
646 : * buffered at the receiver XXX */
647 0 : if (!tcp_opts_sack_permitted (&tc->rcv_opts))
648 0 : tc->cwnd += TCP_DUPACK_THRESHOLD * tc->snd_mss;
649 :
650 0 : tc->fr_occurences += 1;
651 : TCP_EVT (TCP_EVT_CC_EVT, tc, 4);
652 0 : }
653 :
654 : static void
655 0 : tcp_cc_congestion_undo (tcp_connection_t * tc)
656 : {
657 0 : tc->cwnd = tc->prev_cwnd;
658 0 : tc->ssthresh = tc->prev_ssthresh;
659 0 : tcp_cc_undo_recovery (tc);
660 0 : ASSERT (tc->rto_boff == 0);
661 : TCP_EVT (TCP_EVT_CC_EVT, tc, 5);
662 0 : }
663 :
664 : static inline u8
665 0 : tcp_cc_is_spurious_timeout_rxt (tcp_connection_t * tc)
666 : {
667 0 : return (tcp_in_recovery (tc) && tc->rto_boff == 1
668 0 : && tc->snd_rxt_ts
669 0 : && tcp_opts_tstamp (&tc->rcv_opts)
670 0 : && timestamp_lt (tc->rcv_opts.tsecr, tc->snd_rxt_ts));
671 : }
672 :
673 : static inline u8
674 0 : tcp_cc_is_spurious_retransmit (tcp_connection_t * tc)
675 : {
676 0 : return (tcp_cc_is_spurious_timeout_rxt (tc));
677 : }
678 :
679 : static inline u8
680 0 : tcp_should_fastrecover (tcp_connection_t * tc, u8 has_sack)
681 : {
682 0 : if (!has_sack)
683 : {
684 : /* If of of the two conditions lower hold, reset dupacks because
685 : * we're probably after timeout (RFC6582 heuristics).
686 : * If Cumulative ack does not cover more than congestion threshold,
687 : * and:
688 : * 1) The following doesn't hold: The congestion window is greater
689 : * than SMSS bytes and the difference between highest_ack
690 : * and prev_highest_ack is at most 4*SMSS bytes
691 : * 2) Echoed timestamp in the last non-dup ack does not equal the
692 : * stored timestamp
693 : */
694 0 : if (seq_leq (tc->snd_una, tc->snd_congestion)
695 0 : && ((!(tc->cwnd > tc->snd_mss
696 0 : && tc->bytes_acked <= 4 * tc->snd_mss))
697 0 : || (tc->rcv_opts.tsecr != tc->tsecr_last_ack)))
698 : {
699 0 : tc->rcv_dupacks = 0;
700 0 : return 0;
701 : }
702 : }
703 0 : return tc->sack_sb.lost_bytes || tc->rcv_dupacks >= tc->sack_sb.reorder;
704 : }
705 :
706 : static int
707 0 : tcp_cc_try_recover (tcp_connection_t *tc)
708 : {
709 : sack_scoreboard_hole_t *hole;
710 0 : u8 is_spurious = 0;
711 :
712 0 : ASSERT (tcp_in_cong_recovery (tc));
713 :
714 0 : if (tcp_cc_is_spurious_retransmit (tc))
715 : {
716 0 : tcp_cc_congestion_undo (tc);
717 0 : is_spurious = 1;
718 : }
719 :
720 0 : tcp_connection_tx_pacer_reset (tc, tc->cwnd, 0 /* start bucket */ );
721 0 : tc->rcv_dupacks = 0;
722 0 : tcp_recovery_off (tc);
723 :
724 : /* Previous recovery left us congested. Continue sending as part
725 : * of the current recovery event with an updated snd_congestion */
726 0 : if (tc->sack_sb.sacked_bytes && tcp_in_fastrecovery (tc))
727 : {
728 0 : tc->snd_congestion = tc->snd_nxt;
729 0 : return -1;
730 : }
731 :
732 0 : tc->rxt_delivered = 0;
733 0 : tc->snd_rxt_bytes = 0;
734 0 : tc->snd_rxt_ts = 0;
735 0 : tc->prr_delivered = 0;
736 0 : tc->rtt_ts = 0;
737 0 : tc->flags &= ~TCP_CONN_RXT_PENDING;
738 :
739 0 : hole = scoreboard_first_hole (&tc->sack_sb);
740 0 : if (hole && hole->start == tc->snd_una && hole->end == tc->snd_nxt)
741 0 : scoreboard_clear (&tc->sack_sb);
742 :
743 0 : if (tcp_in_fastrecovery (tc) && !is_spurious)
744 0 : tcp_cc_recovered (tc);
745 :
746 0 : tcp_fastrecovery_off (tc);
747 0 : tcp_fastrecovery_first_off (tc);
748 : TCP_EVT (TCP_EVT_CC_EVT, tc, 3);
749 :
750 0 : ASSERT (tc->rto_boff == 0);
751 0 : ASSERT (!tcp_in_cong_recovery (tc));
752 0 : ASSERT (tcp_scoreboard_is_sane_post_recovery (tc));
753 :
754 0 : return 0;
755 : }
756 :
757 : static void
758 1061680 : tcp_cc_update (tcp_connection_t * tc, tcp_rate_sample_t * rs)
759 : {
760 1061680 : ASSERT (!tcp_in_cong_recovery (tc) || tcp_is_lost_fin (tc));
761 :
762 : /* Congestion avoidance */
763 1061680 : tcp_cc_rcv_ack (tc, rs);
764 :
765 : /* If a cumulative ack, make sure dupacks is 0 */
766 1061680 : tc->rcv_dupacks = 0;
767 1061680 : }
768 :
769 : /**
770 : * One function to rule them all ... and in the darkness bind them
771 : */
772 : static void
773 1 : tcp_cc_handle_event (tcp_connection_t * tc, tcp_rate_sample_t * rs,
774 : u32 is_dack)
775 : {
776 1 : u8 has_sack = tcp_opts_sack_permitted (&tc->rcv_opts);
777 :
778 : /* If reneging, wait for timer based retransmits */
779 1 : if (PREDICT_FALSE (tcp_is_lost_fin (tc) || tc->sack_sb.is_reneging))
780 1 : return;
781 :
782 : /*
783 : * If not in recovery, figure out if we should enter
784 : */
785 0 : if (!tcp_in_cong_recovery (tc))
786 : {
787 0 : ASSERT (is_dack);
788 :
789 0 : tc->rcv_dupacks++;
790 : TCP_EVT (TCP_EVT_DUPACK_RCVD, tc, 1);
791 0 : tcp_cc_rcv_cong_ack (tc, TCP_CC_DUPACK, rs);
792 :
793 0 : if (tcp_should_fastrecover (tc, has_sack))
794 : {
795 0 : tcp_cc_init_congestion (tc);
796 :
797 0 : if (has_sack)
798 0 : scoreboard_init_rxt (&tc->sack_sb, tc->snd_una);
799 :
800 0 : tcp_connection_tx_pacer_reset (tc, tc->cwnd, 0 /* start bucket */ );
801 0 : tcp_program_retransmit (tc);
802 : }
803 :
804 0 : return;
805 : }
806 :
807 : /*
808 : * Already in recovery
809 : */
810 :
811 : /*
812 : * See if we can exit and stop retransmitting
813 : */
814 0 : if (seq_geq (tc->snd_una, tc->snd_congestion))
815 : {
816 : /* If successfully recovered, treat ack as congestion avoidance ack
817 : * and return. Otherwise, we're still congested so process feedback */
818 0 : if (!tcp_cc_try_recover (tc))
819 : {
820 0 : tcp_cc_rcv_ack (tc, rs);
821 0 : return;
822 : }
823 : }
824 :
825 : /*
826 : * Process (re)transmit feedback. Output path uses this to decide how much
827 : * more data to release into the network
828 : */
829 0 : if (has_sack)
830 : {
831 0 : if (!tc->bytes_acked && tc->sack_sb.rxt_sacked)
832 0 : tcp_fastrecovery_first_on (tc);
833 :
834 0 : tc->rxt_delivered += tc->sack_sb.rxt_sacked;
835 0 : tc->prr_delivered += rs->delivered;
836 : }
837 : else
838 : {
839 0 : if (is_dack)
840 : {
841 0 : tc->rcv_dupacks += 1;
842 : TCP_EVT (TCP_EVT_DUPACK_RCVD, tc, 1);
843 : }
844 0 : tc->rxt_delivered = clib_min (tc->rxt_delivered + tc->bytes_acked,
845 : tc->snd_rxt_bytes);
846 0 : if (is_dack)
847 0 : tc->prr_delivered += clib_min (tc->snd_mss,
848 : tc->snd_nxt - tc->snd_una);
849 : else
850 0 : tc->prr_delivered += tc->bytes_acked - clib_min (tc->bytes_acked,
851 : tc->snd_mss *
852 : tc->rcv_dupacks);
853 :
854 : /* If partial ack, assume that the first un-acked segment was lost */
855 0 : if (tc->bytes_acked || tc->rcv_dupacks == TCP_DUPACK_THRESHOLD)
856 0 : tcp_fastrecovery_first_on (tc);
857 : }
858 :
859 0 : tcp_program_retransmit (tc);
860 :
861 : /*
862 : * Notify cc of the event
863 : */
864 :
865 0 : if (!tc->bytes_acked)
866 : {
867 0 : tcp_cc_rcv_cong_ack (tc, TCP_CC_DUPACK, rs);
868 0 : return;
869 : }
870 :
871 : /* RFC6675: If the incoming ACK is a cumulative acknowledgment,
872 : * reset dupacks to 0. Also needed if in congestion recovery */
873 0 : tc->rcv_dupacks = 0;
874 :
875 0 : if (tcp_in_recovery (tc))
876 0 : tcp_cc_rcv_ack (tc, rs);
877 : else
878 0 : tcp_cc_rcv_cong_ack (tc, TCP_CC_PARTIALACK, rs);
879 : }
880 :
881 : static void
882 0 : tcp_handle_old_ack (tcp_connection_t * tc, tcp_rate_sample_t * rs)
883 : {
884 0 : if (!tcp_in_cong_recovery (tc))
885 0 : return;
886 :
887 0 : if (tcp_opts_sack_permitted (&tc->rcv_opts))
888 0 : tcp_rcv_sacks (tc, tc->snd_una);
889 :
890 0 : tc->bytes_acked = 0;
891 :
892 0 : if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
893 0 : tcp_bt_sample_delivery_rate (tc, rs);
894 :
895 0 : tcp_cc_handle_event (tc, rs, 1);
896 : }
897 :
898 : /**
899 : * Check if duplicate ack as per RFC5681 Sec. 2
900 : */
901 : always_inline u8
902 1061680 : tcp_ack_is_dupack (tcp_connection_t * tc, vlib_buffer_t * b, u32 prev_snd_wnd,
903 : u32 prev_snd_una)
904 : {
905 1061680 : return ((vnet_buffer (b)->tcp.ack_number == prev_snd_una)
906 1019980 : && seq_gt (tc->snd_nxt, tc->snd_una)
907 32752 : && (vnet_buffer (b)->tcp.seq_end == vnet_buffer (b)->tcp.seq_number)
908 2081660 : && (prev_snd_wnd == tc->snd_wnd));
909 : }
910 :
911 : /**
912 : * Checks if ack is a congestion control event.
913 : */
914 : static u8
915 1061680 : tcp_ack_is_cc_event (tcp_connection_t * tc, vlib_buffer_t * b,
916 : u32 prev_snd_wnd, u32 prev_snd_una, u8 * is_dack)
917 : {
918 : /* Check if ack is duplicate. Per RFC 6675, ACKs that SACK new data are
919 : * defined to be 'duplicate' as well */
920 2123360 : *is_dack = tc->sack_sb.last_sacked_bytes
921 1061680 : || tcp_ack_is_dupack (tc, b, prev_snd_wnd, prev_snd_una);
922 :
923 1061680 : return (*is_dack || tcp_in_cong_recovery (tc));
924 : }
925 :
926 : /**
927 : * Process incoming ACK
928 : */
929 : static int
930 1061680 : tcp_rcv_ack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc, vlib_buffer_t * b,
931 : tcp_header_t * th, u32 * error)
932 : {
933 : u32 prev_snd_wnd, prev_snd_una;
934 1061680 : tcp_rate_sample_t rs = { 0 };
935 : u8 is_dack;
936 :
937 : TCP_EVT (TCP_EVT_CC_STAT, tc);
938 :
939 : /* If the ACK acks something not yet sent (SEG.ACK > SND.NXT) */
940 1061680 : if (PREDICT_FALSE (seq_gt (vnet_buffer (b)->tcp.ack_number, tc->snd_nxt)))
941 : {
942 : /* We've probably entered recovery and the peer still has some
943 : * of the data we've sent. Update snd_nxt and accept the ack */
944 0 : if (seq_leq (vnet_buffer (b)->tcp.ack_number, tc->snd_nxt)
945 0 : && seq_gt (vnet_buffer (b)->tcp.ack_number, tc->snd_una))
946 : {
947 0 : tc->snd_nxt = vnet_buffer (b)->tcp.ack_number;
948 0 : goto process_ack;
949 : }
950 :
951 0 : tc->errors.above_ack_wnd += 1;
952 0 : *error = TCP_ERROR_ACK_FUTURE;
953 : TCP_EVT (TCP_EVT_ACK_RCV_ERR, tc, 0, vnet_buffer (b)->tcp.ack_number);
954 0 : return -1;
955 : }
956 :
957 : /* If old ACK, probably it's an old dupack */
958 1061680 : if (PREDICT_FALSE (seq_lt (vnet_buffer (b)->tcp.ack_number, tc->snd_una)))
959 : {
960 0 : tc->errors.below_ack_wnd += 1;
961 0 : *error = TCP_ERROR_ACK_OLD;
962 : TCP_EVT (TCP_EVT_ACK_RCV_ERR, tc, 1, vnet_buffer (b)->tcp.ack_number);
963 :
964 0 : if (seq_lt (vnet_buffer (b)->tcp.ack_number, tc->snd_una - tc->rcv_wnd))
965 0 : return -1;
966 :
967 0 : tcp_handle_old_ack (tc, &rs);
968 :
969 : /* Don't drop yet */
970 0 : return 0;
971 : }
972 :
973 1061680 : process_ack:
974 :
975 : /*
976 : * Looks okay, process feedback
977 : */
978 :
979 1061680 : if (tcp_opts_sack_permitted (&tc->rcv_opts))
980 1061680 : tcp_rcv_sacks (tc, vnet_buffer (b)->tcp.ack_number);
981 :
982 1061680 : prev_snd_wnd = tc->snd_wnd;
983 1061680 : prev_snd_una = tc->snd_una;
984 1061680 : tcp_update_snd_wnd (tc, vnet_buffer (b)->tcp.seq_number,
985 1061680 : vnet_buffer (b)->tcp.ack_number,
986 1061680 : clib_net_to_host_u16 (th->window) << tc->snd_wscale);
987 1061680 : tc->bytes_acked = vnet_buffer (b)->tcp.ack_number - tc->snd_una;
988 1061680 : tc->snd_una = vnet_buffer (b)->tcp.ack_number;
989 1061680 : tcp_validate_txf_size (tc, tc->bytes_acked);
990 :
991 1061680 : if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
992 0 : tcp_bt_sample_delivery_rate (tc, &rs);
993 : else
994 1061680 : rs.delivered = tc->bytes_acked + tc->sack_sb.last_sacked_bytes -
995 1061680 : tc->sack_sb.last_bytes_delivered;
996 :
997 1061680 : if (tc->bytes_acked + tc->sack_sb.last_sacked_bytes)
998 : {
999 41698 : tcp_update_rtt (tc, &rs, vnet_buffer (b)->tcp.ack_number);
1000 41698 : if (tc->bytes_acked)
1001 41698 : tcp_program_dequeue (wrk, tc);
1002 : }
1003 :
1004 : TCP_EVT (TCP_EVT_ACK_RCVD, tc);
1005 :
1006 : /*
1007 : * Check if we have congestion event
1008 : */
1009 :
1010 1061680 : if (tcp_ack_is_cc_event (tc, b, prev_snd_wnd, prev_snd_una, &is_dack))
1011 : {
1012 1 : tcp_cc_handle_event (tc, &rs, is_dack);
1013 1 : tc->dupacks_in += is_dack;
1014 1 : if (!tcp_in_cong_recovery (tc))
1015 : {
1016 1 : *error = TCP_ERROR_ACK_OK;
1017 1 : return 0;
1018 : }
1019 0 : *error = TCP_ERROR_ACK_DUP;
1020 0 : if (vnet_buffer (b)->tcp.data_len || tcp_is_fin (th))
1021 0 : return 0;
1022 0 : return -1;
1023 : }
1024 :
1025 : /*
1026 : * Update congestion control (slow start/congestion avoidance)
1027 : */
1028 1061680 : tcp_cc_update (tc, &rs);
1029 1061680 : *error = TCP_ERROR_ACK_OK;
1030 1061680 : return 0;
1031 : }
1032 :
1033 : static void
1034 127 : tcp_program_disconnect (tcp_worker_ctx_t * wrk, tcp_connection_t * tc)
1035 : {
1036 127 : if (!tcp_disconnect_pending (tc))
1037 : {
1038 127 : vec_add1 (wrk->pending_disconnects, tc->c_c_index);
1039 127 : tcp_disconnect_pending_on (tc);
1040 : }
1041 127 : }
1042 :
1043 : static void
1044 38137 : tcp_handle_disconnects (tcp_worker_ctx_t * wrk)
1045 : {
1046 : u32 thread_index, *pending_disconnects, *pending_resets;
1047 : tcp_connection_t *tc;
1048 : int i;
1049 :
1050 38137 : if (vec_len (wrk->pending_disconnects))
1051 : {
1052 24 : thread_index = wrk->vm->thread_index;
1053 24 : pending_disconnects = wrk->pending_disconnects;
1054 151 : for (i = 0; i < vec_len (pending_disconnects); i++)
1055 : {
1056 127 : tc = tcp_connection_get (pending_disconnects[i], thread_index);
1057 127 : tcp_disconnect_pending_off (tc);
1058 127 : session_transport_closing_notify (&tc->connection);
1059 : }
1060 24 : vec_set_len (wrk->pending_disconnects, 0);
1061 : }
1062 :
1063 38137 : if (vec_len (wrk->pending_resets))
1064 : {
1065 4 : thread_index = wrk->vm->thread_index;
1066 4 : pending_resets = wrk->pending_resets;
1067 8 : for (i = 0; i < vec_len (pending_resets); i++)
1068 : {
1069 4 : tc = tcp_connection_get (pending_resets[i], thread_index);
1070 4 : tcp_disconnect_pending_off (tc);
1071 4 : tcp_handle_rst (tc);
1072 : }
1073 4 : vec_set_len (wrk->pending_resets, 0);
1074 : }
1075 38137 : }
1076 :
1077 : static void
1078 127 : tcp_rcv_fin (tcp_worker_ctx_t * wrk, tcp_connection_t * tc, vlib_buffer_t * b,
1079 : u32 * error)
1080 : {
1081 : /* Reject out-of-order fins */
1082 127 : if (vnet_buffer (b)->tcp.seq_end != tc->rcv_nxt)
1083 0 : return;
1084 :
1085 : /* Account for the FIN and send ack */
1086 127 : tc->rcv_nxt += 1;
1087 127 : tc->flags |= TCP_CONN_FINRCVD;
1088 127 : tcp_program_ack (tc);
1089 : /* Enter CLOSE-WAIT and notify session. To avoid lingering
1090 : * in CLOSE-WAIT, set timer (reuse WAITCLOSE). */
1091 127 : tcp_connection_set_state (tc, TCP_STATE_CLOSE_WAIT);
1092 127 : tcp_program_disconnect (wrk, tc);
1093 127 : tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_WAITCLOSE,
1094 : tcp_cfg.closewait_time);
1095 : TCP_EVT (TCP_EVT_FIN_RCVD, tc);
1096 127 : *error = TCP_ERROR_FIN_RCVD;
1097 : }
1098 :
1099 : /** Enqueue data for delivery to application */
1100 : static int
1101 1029010 : tcp_session_enqueue_data (tcp_connection_t * tc, vlib_buffer_t * b,
1102 : u16 data_len)
1103 : {
1104 1029010 : int written, error = TCP_ERROR_ENQUEUED;
1105 :
1106 1029010 : ASSERT (seq_geq (vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt));
1107 1029010 : ASSERT (data_len);
1108 1029010 : written = session_enqueue_stream_connection (&tc->connection, b, 0,
1109 : 1 /* queue event */ , 1);
1110 :
1111 : TCP_EVT (TCP_EVT_INPUT, tc, 0, data_len, written);
1112 :
1113 : /* Update rcv_nxt */
1114 1029010 : if (PREDICT_TRUE (written == data_len))
1115 : {
1116 1029010 : tc->rcv_nxt += written;
1117 1029010 : tc->bytes_in += written;
1118 : }
1119 : /* If more data written than expected, account for out-of-order bytes. */
1120 0 : else if (written > data_len)
1121 : {
1122 0 : tc->rcv_nxt += written;
1123 0 : tc->bytes_in += data_len;
1124 : TCP_EVT (TCP_EVT_CC_INPUT, tc, data_len, written);
1125 : }
1126 0 : else if (written > 0)
1127 : {
1128 : /* We've written something but FIFO is probably full now */
1129 0 : tc->rcv_nxt += written;
1130 0 : tc->bytes_in += written;
1131 0 : error = TCP_ERROR_PARTIALLY_ENQUEUED;
1132 : }
1133 : else
1134 : {
1135 : /* Packet made it through for ack processing */
1136 0 : if (tc->rcv_wnd < tc->snd_mss)
1137 0 : return TCP_ERROR_ZERO_RWND;
1138 :
1139 0 : return TCP_ERROR_FIFO_FULL;
1140 : }
1141 :
1142 : /* Update SACK list if need be */
1143 1029010 : if (tcp_opts_sack_permitted (&tc->rcv_opts) && vec_len (tc->snd_sacks))
1144 : {
1145 : /* Remove SACK blocks that have been delivered */
1146 0 : tcp_update_sack_list (tc, tc->rcv_nxt, tc->rcv_nxt);
1147 : }
1148 :
1149 1029010 : return error;
1150 : }
1151 :
1152 : /** Enqueue out-of-order data */
1153 : static int
1154 0 : tcp_session_enqueue_ooo (tcp_connection_t * tc, vlib_buffer_t * b,
1155 : u16 data_len)
1156 : {
1157 : session_t *s0;
1158 : int rv, offset;
1159 :
1160 0 : ASSERT (seq_gt (vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt));
1161 0 : ASSERT (data_len);
1162 :
1163 : /* Enqueue out-of-order data with relative offset */
1164 0 : rv = session_enqueue_stream_connection (&tc->connection, b,
1165 0 : vnet_buffer (b)->tcp.seq_number -
1166 0 : tc->rcv_nxt, 0 /* queue event */ ,
1167 : 0);
1168 :
1169 : /* Nothing written */
1170 0 : if (rv)
1171 : {
1172 : TCP_EVT (TCP_EVT_INPUT, tc, 1, data_len, 0);
1173 0 : return TCP_ERROR_FIFO_FULL;
1174 : }
1175 :
1176 : TCP_EVT (TCP_EVT_INPUT, tc, 1, data_len, data_len);
1177 0 : tc->bytes_in += data_len;
1178 :
1179 : /* Update SACK list if in use */
1180 0 : if (tcp_opts_sack_permitted (&tc->rcv_opts))
1181 : {
1182 : ooo_segment_t *newest;
1183 : u32 start, end;
1184 :
1185 0 : s0 = session_get (tc->c_s_index, tc->c_thread_index);
1186 :
1187 : /* Get the newest segment from the fifo */
1188 0 : newest = svm_fifo_newest_ooo_segment (s0->rx_fifo);
1189 0 : if (newest)
1190 : {
1191 0 : offset = ooo_segment_offset_prod (s0->rx_fifo, newest);
1192 0 : ASSERT (offset <= vnet_buffer (b)->tcp.seq_number - tc->rcv_nxt);
1193 0 : start = tc->rcv_nxt + offset;
1194 0 : end = start + ooo_segment_length (s0->rx_fifo, newest);
1195 0 : tcp_update_sack_list (tc, start, end);
1196 0 : svm_fifo_newest_ooo_segment_reset (s0->rx_fifo);
1197 : TCP_EVT (TCP_EVT_CC_SACKS, tc);
1198 : }
1199 : }
1200 :
1201 0 : return TCP_ERROR_ENQUEUED_OOO;
1202 : }
1203 :
1204 : static int
1205 0 : tcp_buffer_discard_bytes (vlib_buffer_t * b, u32 n_bytes_to_drop)
1206 : {
1207 0 : u32 discard, first = b->current_length;
1208 0 : vlib_main_t *vm = vlib_get_main ();
1209 :
1210 : /* Handle multi-buffer segments */
1211 0 : if (n_bytes_to_drop > b->current_length)
1212 : {
1213 0 : if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
1214 0 : return -1;
1215 : do
1216 : {
1217 0 : discard = clib_min (n_bytes_to_drop, b->current_length);
1218 0 : vlib_buffer_advance (b, discard);
1219 0 : b = vlib_get_buffer (vm, b->next_buffer);
1220 0 : n_bytes_to_drop -= discard;
1221 : }
1222 0 : while (n_bytes_to_drop);
1223 0 : if (n_bytes_to_drop > first)
1224 0 : b->total_length_not_including_first_buffer -= n_bytes_to_drop - first;
1225 : }
1226 : else
1227 0 : vlib_buffer_advance (b, n_bytes_to_drop);
1228 0 : vnet_buffer (b)->tcp.data_len -= n_bytes_to_drop;
1229 0 : return 0;
1230 : }
1231 :
1232 : /**
1233 : * Receive buffer for connection and handle acks
1234 : *
1235 : * It handles both in order or out-of-order data.
1236 : */
1237 : static int
1238 1029010 : tcp_segment_rcv (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
1239 : vlib_buffer_t * b)
1240 : {
1241 : u32 error, n_bytes_to_drop, n_data_bytes;
1242 :
1243 1029010 : vlib_buffer_advance (b, vnet_buffer (b)->tcp.data_offset);
1244 1029010 : n_data_bytes = vnet_buffer (b)->tcp.data_len;
1245 1029010 : ASSERT (n_data_bytes);
1246 1029010 : tc->data_segs_in += 1;
1247 :
1248 : /* Make sure we don't consume trailing bytes */
1249 1029010 : if (PREDICT_FALSE (b->current_length > n_data_bytes))
1250 0 : b->current_length = n_data_bytes;
1251 :
1252 : /* Handle out-of-order data */
1253 1029010 : if (PREDICT_FALSE (vnet_buffer (b)->tcp.seq_number != tc->rcv_nxt))
1254 : {
1255 : /* Old sequence numbers allowed through because they overlapped
1256 : * the rx window */
1257 0 : if (seq_lt (vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt))
1258 : {
1259 : /* Completely in the past (possible retransmit). Ack
1260 : * retransmissions since we may not have any data to send */
1261 0 : if (seq_leq (vnet_buffer (b)->tcp.seq_end, tc->rcv_nxt))
1262 : {
1263 0 : tcp_program_dupack (tc);
1264 0 : tc->errors.below_data_wnd++;
1265 0 : error = TCP_ERROR_SEGMENT_OLD;
1266 0 : goto done;
1267 : }
1268 :
1269 : /* Chop off the bytes in the past and see if what is left
1270 : * can be enqueued in order */
1271 0 : n_bytes_to_drop = tc->rcv_nxt - vnet_buffer (b)->tcp.seq_number;
1272 0 : n_data_bytes -= n_bytes_to_drop;
1273 0 : vnet_buffer (b)->tcp.seq_number = tc->rcv_nxt;
1274 0 : if (tcp_buffer_discard_bytes (b, n_bytes_to_drop))
1275 : {
1276 0 : error = TCP_ERROR_SEGMENT_OLD;
1277 0 : goto done;
1278 : }
1279 0 : goto in_order;
1280 : }
1281 :
1282 : /* RFC2581: Enqueue and send DUPACK for fast retransmit */
1283 0 : error = tcp_session_enqueue_ooo (tc, b, n_data_bytes);
1284 0 : tcp_program_dupack (tc);
1285 : TCP_EVT (TCP_EVT_DUPACK_SENT, tc, vnet_buffer (b)->tcp);
1286 0 : tc->errors.above_data_wnd += seq_gt (vnet_buffer (b)->tcp.seq_end,
1287 : tc->rcv_las + tc->rcv_wnd);
1288 0 : goto done;
1289 : }
1290 :
1291 1029010 : in_order:
1292 :
1293 : /* In order data, enqueue. Fifo figures out by itself if any out-of-order
1294 : * segments can be enqueued after fifo tail offset changes. */
1295 1029010 : error = tcp_session_enqueue_data (tc, b, n_data_bytes);
1296 1029010 : tcp_program_ack (tc);
1297 :
1298 1029010 : done:
1299 1029010 : return error;
1300 : }
1301 :
1302 : typedef struct
1303 : {
1304 : tcp_header_t tcp_header;
1305 : tcp_connection_t tcp_connection;
1306 : } tcp_rx_trace_t;
1307 :
1308 : static u8 *
1309 6 : format_tcp_rx_trace (u8 * s, va_list * args)
1310 : {
1311 6 : CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1312 6 : CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1313 6 : tcp_rx_trace_t *t = va_arg (*args, tcp_rx_trace_t *);
1314 6 : tcp_connection_t *tc = &t->tcp_connection;
1315 6 : u32 indent = format_get_indent (s);
1316 :
1317 6 : if (!tc->c_lcl_port)
1318 0 : s = format (s, "no tcp connection\n%U%U", format_white_space, indent,
1319 : format_tcp_header, &t->tcp_header, 128);
1320 : else
1321 6 : s = format (s, "%U state %U\n%U%U", format_tcp_connection_id, tc,
1322 6 : format_tcp_state, tc->state, format_white_space, indent,
1323 : format_tcp_header, &t->tcp_header, 128);
1324 :
1325 6 : return s;
1326 : }
1327 :
1328 : static u8 *
1329 6 : format_tcp_rx_trace_short (u8 * s, va_list * args)
1330 : {
1331 6 : CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1332 6 : CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1333 6 : tcp_rx_trace_t *t = va_arg (*args, tcp_rx_trace_t *);
1334 :
1335 6 : s = format (s, "%d -> %d (%U)",
1336 6 : clib_net_to_host_u16 (t->tcp_header.dst_port),
1337 6 : clib_net_to_host_u16 (t->tcp_header.src_port), format_tcp_state,
1338 6 : t->tcp_connection.state);
1339 :
1340 6 : return s;
1341 : }
1342 :
1343 : static void
1344 6 : tcp_set_rx_trace_data (tcp_rx_trace_t * t0, tcp_connection_t * tc0,
1345 : tcp_header_t * th0, vlib_buffer_t * b0, u8 is_ip4)
1346 : {
1347 6 : if (tc0)
1348 : {
1349 6 : clib_memcpy_fast (&t0->tcp_connection, tc0,
1350 : sizeof (t0->tcp_connection));
1351 : }
1352 : else
1353 : {
1354 0 : th0 = tcp_buffer_hdr (b0);
1355 : }
1356 6 : clib_memcpy_fast (&t0->tcp_header, th0, sizeof (t0->tcp_header));
1357 6 : }
1358 :
1359 : static void
1360 0 : tcp_established_trace_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
1361 : vlib_frame_t * frame, u8 is_ip4)
1362 : {
1363 : u32 *from, n_left;
1364 :
1365 0 : n_left = frame->n_vectors;
1366 0 : from = vlib_frame_vector_args (frame);
1367 :
1368 0 : while (n_left >= 1)
1369 : {
1370 : tcp_connection_t *tc0;
1371 : tcp_rx_trace_t *t0;
1372 : tcp_header_t *th0;
1373 : vlib_buffer_t *b0;
1374 : u32 bi0;
1375 :
1376 0 : bi0 = from[0];
1377 0 : b0 = vlib_get_buffer (vm, bi0);
1378 :
1379 0 : if (b0->flags & VLIB_BUFFER_IS_TRACED)
1380 : {
1381 0 : t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
1382 0 : tc0 = tcp_connection_get (vnet_buffer (b0)->tcp.connection_index,
1383 : vm->thread_index);
1384 0 : th0 = tcp_buffer_hdr (b0);
1385 0 : tcp_set_rx_trace_data (t0, tc0, th0, b0, is_ip4);
1386 : }
1387 :
1388 0 : from += 1;
1389 0 : n_left -= 1;
1390 : }
1391 0 : }
1392 :
1393 : always_inline uword
1394 37998 : tcp46_established_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
1395 : vlib_frame_t * frame, int is_ip4)
1396 : {
1397 37998 : u32 thread_index = vm->thread_index, n_left_from, *from;
1398 37998 : tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
1399 : vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
1400 37998 : u16 err_counters[TCP_N_ERROR] = { 0 };
1401 :
1402 37998 : if (node->flags & VLIB_NODE_FLAG_TRACE)
1403 0 : tcp_established_trace_frame (vm, node, frame, is_ip4);
1404 :
1405 37998 : from = vlib_frame_vector_args (frame);
1406 37998 : n_left_from = frame->n_vectors;
1407 :
1408 37998 : vlib_get_buffers (vm, from, bufs, n_left_from);
1409 37998 : b = bufs;
1410 :
1411 1099550 : while (n_left_from > 0)
1412 : {
1413 1061550 : u32 error = TCP_ERROR_ACK_OK;
1414 : tcp_connection_t *tc;
1415 : tcp_header_t *th;
1416 :
1417 1061550 : if (n_left_from > 1)
1418 : {
1419 1023560 : vlib_prefetch_buffer_header (b[1], LOAD);
1420 1023560 : CLIB_PREFETCH (b[1]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
1421 : }
1422 :
1423 1061550 : tc = tcp_connection_get (vnet_buffer (b[0])->tcp.connection_index,
1424 : thread_index);
1425 :
1426 1061550 : if (PREDICT_FALSE (tc == 0))
1427 : {
1428 0 : error = TCP_ERROR_INVALID_CONNECTION;
1429 0 : goto done;
1430 : }
1431 :
1432 1061550 : th = tcp_buffer_hdr (b[0]);
1433 :
1434 : /* TODO header prediction fast path */
1435 :
1436 : /* 1-4: check SEQ, RST, SYN */
1437 1061550 : if (PREDICT_FALSE (tcp_segment_validate (wrk, tc, b[0], th, &error)))
1438 : {
1439 : TCP_EVT (TCP_EVT_SEG_INVALID, tc, vnet_buffer (b[0])->tcp);
1440 6 : goto done;
1441 : }
1442 :
1443 : /* 5: check the ACK field */
1444 1061550 : if (PREDICT_FALSE (tcp_rcv_ack (wrk, tc, b[0], th, &error)))
1445 0 : goto done;
1446 :
1447 : /* 6: check the URG bit TODO */
1448 :
1449 : /* 7: process the segment text */
1450 1061550 : if (vnet_buffer (b[0])->tcp.data_len)
1451 1029000 : error = tcp_segment_rcv (wrk, tc, b[0]);
1452 :
1453 : /* 8: check the FIN bit */
1454 1061550 : if (PREDICT_FALSE (tcp_is_fin (th)))
1455 127 : tcp_rcv_fin (wrk, tc, b[0], &error);
1456 :
1457 1061420 : done:
1458 1061550 : tcp_inc_err_counter (err_counters, error, 1);
1459 :
1460 1061550 : n_left_from -= 1;
1461 1061550 : b += 1;
1462 : }
1463 :
1464 37998 : session_main_flush_enqueue_events (TRANSPORT_PROTO_TCP, thread_index);
1465 1443920 : tcp_store_err_counters (established, err_counters);
1466 37998 : tcp_handle_postponed_dequeues (wrk);
1467 37998 : tcp_handle_disconnects (wrk);
1468 37998 : vlib_buffer_free (vm, from, frame->n_vectors);
1469 :
1470 37998 : return frame->n_vectors;
1471 : }
1472 :
1473 40287 : VLIB_NODE_FN (tcp4_established_node) (vlib_main_t * vm,
1474 : vlib_node_runtime_t * node,
1475 : vlib_frame_t * from_frame)
1476 : {
1477 37987 : return tcp46_established_inline (vm, node, from_frame, 1 /* is_ip4 */ );
1478 : }
1479 :
1480 2311 : VLIB_NODE_FN (tcp6_established_node) (vlib_main_t * vm,
1481 : vlib_node_runtime_t * node,
1482 : vlib_frame_t * from_frame)
1483 : {
1484 11 : return tcp46_established_inline (vm, node, from_frame, 0 /* is_ip4 */ );
1485 : }
1486 :
1487 : /* *INDENT-OFF* */
1488 183788 : VLIB_REGISTER_NODE (tcp4_established_node) = {
1489 : .name = "tcp4-established",
1490 : /* Takes a vector of packets. */
1491 : .vector_size = sizeof (u32),
1492 : .n_errors = TCP_N_ERROR,
1493 : .error_counters = tcp_input_error_counters,
1494 : .format_trace = format_tcp_rx_trace_short,
1495 : };
1496 : /* *INDENT-ON* */
1497 :
1498 : /* *INDENT-OFF* */
1499 183788 : VLIB_REGISTER_NODE (tcp6_established_node) = {
1500 : .name = "tcp6-established",
1501 : /* Takes a vector of packets. */
1502 : .vector_size = sizeof (u32),
1503 : .n_errors = TCP_N_ERROR,
1504 : .error_counters = tcp_input_error_counters,
1505 : .format_trace = format_tcp_rx_trace_short,
1506 : };
1507 : /* *INDENT-ON* */
1508 :
1509 :
1510 : static u8
1511 1063170 : tcp_lookup_is_valid (tcp_connection_t * tc, vlib_buffer_t * b,
1512 : tcp_header_t * hdr)
1513 : {
1514 1063170 : transport_connection_t *tmp = 0;
1515 : u64 handle;
1516 :
1517 1063170 : if (!tc)
1518 0 : return 1;
1519 :
1520 : /* Proxy case */
1521 1063170 : if (tc->c_lcl_port == 0 && tc->state == TCP_STATE_LISTEN)
1522 0 : return 1;
1523 :
1524 1063170 : u8 is_ip_valid = 0, val_l, val_r;
1525 :
1526 1063170 : if (tc->connection.is_ip4)
1527 : {
1528 1063150 : ip4_header_t *ip4_hdr = (ip4_header_t *) vlib_buffer_get_current (b);
1529 :
1530 1063150 : val_l = !ip4_address_compare (&ip4_hdr->dst_address,
1531 : &tc->connection.lcl_ip.ip4);
1532 1063150 : val_l = val_l || ip_is_zero (&tc->connection.lcl_ip, 1);
1533 1063150 : val_r = !ip4_address_compare (&ip4_hdr->src_address,
1534 : &tc->connection.rmt_ip.ip4);
1535 1063150 : val_r = val_r || tc->state == TCP_STATE_LISTEN;
1536 1063150 : is_ip_valid = val_l && val_r;
1537 : }
1538 : else
1539 : {
1540 22 : ip6_header_t *ip6_hdr = (ip6_header_t *) vlib_buffer_get_current (b);
1541 :
1542 22 : val_l = !ip6_address_compare (&ip6_hdr->dst_address,
1543 : &tc->connection.lcl_ip.ip6);
1544 22 : val_l = val_l || ip_is_zero (&tc->connection.lcl_ip, 0);
1545 22 : val_r = !ip6_address_compare (&ip6_hdr->src_address,
1546 : &tc->connection.rmt_ip.ip6);
1547 22 : val_r = val_r || tc->state == TCP_STATE_LISTEN;
1548 22 : is_ip_valid = val_l && val_r;
1549 : }
1550 :
1551 2126340 : u8 is_valid = (tc->c_lcl_port == hdr->dst_port
1552 1063170 : && (tc->state == TCP_STATE_LISTEN
1553 2126340 : || tc->c_rmt_port == hdr->src_port) && is_ip_valid);
1554 :
1555 1063170 : if (!is_valid)
1556 : {
1557 0 : handle = session_lookup_half_open_handle (&tc->connection);
1558 0 : tmp = session_lookup_half_open_connection (handle & 0xFFFFFFFF,
1559 0 : tc->c_proto, tc->c_is_ip4);
1560 :
1561 0 : if (tmp)
1562 : {
1563 0 : if (tmp->lcl_port == hdr->dst_port
1564 0 : && tmp->rmt_port == hdr->src_port)
1565 : {
1566 : TCP_DBG ("half-open is valid!");
1567 0 : is_valid = 1;
1568 : }
1569 : }
1570 : }
1571 1063170 : return is_valid;
1572 : }
1573 :
1574 : /**
1575 : * Lookup transport connection
1576 : */
1577 : static tcp_connection_t *
1578 656 : tcp_lookup_connection (u32 fib_index, vlib_buffer_t * b, u8 thread_index,
1579 : u8 is_ip4)
1580 : {
1581 : tcp_header_t *tcp;
1582 : transport_connection_t *tconn;
1583 : tcp_connection_t *tc;
1584 656 : u8 is_filtered = 0;
1585 656 : if (is_ip4)
1586 : {
1587 : ip4_header_t *ip4;
1588 651 : ip4 = vlib_buffer_get_current (b);
1589 651 : tcp = ip4_next_header (ip4);
1590 651 : tconn = session_lookup_connection_wt4 (fib_index,
1591 : &ip4->dst_address,
1592 : &ip4->src_address,
1593 651 : tcp->dst_port,
1594 651 : tcp->src_port,
1595 : TRANSPORT_PROTO_TCP,
1596 : thread_index, &is_filtered);
1597 651 : tc = tcp_get_connection_from_transport (tconn);
1598 651 : ASSERT (tcp_lookup_is_valid (tc, b, tcp));
1599 : }
1600 : else
1601 : {
1602 : ip6_header_t *ip6;
1603 5 : ip6 = vlib_buffer_get_current (b);
1604 5 : tcp = ip6_next_header (ip6);
1605 5 : tconn = session_lookup_connection_wt6 (fib_index,
1606 : &ip6->dst_address,
1607 : &ip6->src_address,
1608 5 : tcp->dst_port,
1609 5 : tcp->src_port,
1610 : TRANSPORT_PROTO_TCP,
1611 : thread_index, &is_filtered);
1612 5 : tc = tcp_get_connection_from_transport (tconn);
1613 5 : ASSERT (tcp_lookup_is_valid (tc, b, tcp));
1614 : }
1615 656 : return tc;
1616 : }
1617 :
1618 : static tcp_connection_t *
1619 0 : tcp_lookup_listener (vlib_buffer_t * b, u32 fib_index, int is_ip4)
1620 : {
1621 : session_t *s;
1622 :
1623 0 : if (is_ip4)
1624 : {
1625 0 : ip4_header_t *ip4 = vlib_buffer_get_current (b);
1626 0 : tcp_header_t *tcp = tcp_buffer_hdr (b);
1627 0 : s = session_lookup_listener4 (fib_index,
1628 : &ip4->dst_address,
1629 0 : tcp->dst_port, TRANSPORT_PROTO_TCP, 1);
1630 : }
1631 : else
1632 : {
1633 0 : ip6_header_t *ip6 = vlib_buffer_get_current (b);
1634 0 : tcp_header_t *tcp = tcp_buffer_hdr (b);
1635 0 : s = session_lookup_listener6 (fib_index,
1636 : &ip6->dst_address,
1637 0 : tcp->dst_port, TRANSPORT_PROTO_TCP, 1);
1638 :
1639 : }
1640 0 : if (PREDICT_TRUE (s != 0))
1641 0 : return tcp_get_connection_from_transport (transport_get_listener
1642 : (TRANSPORT_PROTO_TCP,
1643 : s->connection_index));
1644 : else
1645 0 : return 0;
1646 : }
1647 :
1648 : static void
1649 0 : tcp46_syn_sent_trace_frame (vlib_main_t *vm, vlib_node_runtime_t *node,
1650 : u32 *from, u32 n_bufs)
1651 : {
1652 0 : tcp_connection_t *tc = 0;
1653 : tcp_rx_trace_t *t;
1654 : vlib_buffer_t *b;
1655 : int i;
1656 :
1657 0 : for (i = 0; i < n_bufs; i++)
1658 : {
1659 0 : b = vlib_get_buffer (vm, from[i]);
1660 0 : if (!(b->flags & VLIB_BUFFER_IS_TRACED))
1661 0 : continue;
1662 : tc =
1663 0 : tcp_half_open_connection_get (vnet_buffer (b)->tcp.connection_index);
1664 0 : t = vlib_add_trace (vm, node, b, sizeof (*t));
1665 0 : tcp_set_rx_trace_data (t, tc, tcp_buffer_hdr (b), b, 1);
1666 : }
1667 0 : }
1668 :
1669 : always_inline void
1670 0 : tcp_check_tx_offload (tcp_connection_t * tc, int is_ipv4)
1671 : {
1672 0 : vnet_main_t *vnm = vnet_get_main ();
1673 : const dpo_id_t *dpo;
1674 : const load_balance_t *lb;
1675 : vnet_hw_interface_t *hw_if;
1676 : u32 sw_if_idx, lb_idx;
1677 :
1678 0 : if (is_ipv4)
1679 : {
1680 0 : ip4_address_t *dst_addr = &(tc->c_rmt_ip.ip4);
1681 0 : lb_idx = ip4_fib_forwarding_lookup (tc->c_fib_index, dst_addr);
1682 : }
1683 : else
1684 : {
1685 0 : ip6_address_t *dst_addr = &(tc->c_rmt_ip.ip6);
1686 0 : lb_idx = ip6_fib_table_fwding_lookup (tc->c_fib_index, dst_addr);
1687 : }
1688 :
1689 0 : lb = load_balance_get (lb_idx);
1690 0 : if (PREDICT_FALSE (lb->lb_n_buckets > 1))
1691 0 : return;
1692 0 : dpo = load_balance_get_bucket_i (lb, 0);
1693 :
1694 0 : sw_if_idx = dpo_get_urpf (dpo);
1695 0 : if (PREDICT_FALSE (sw_if_idx == ~0))
1696 0 : return;
1697 :
1698 0 : hw_if = vnet_get_sup_hw_interface (vnm, sw_if_idx);
1699 0 : if (hw_if->caps & VNET_HW_IF_CAP_TCP_GSO)
1700 0 : tc->cfg_flags |= TCP_CFG_F_TSO;
1701 : }
1702 :
1703 : static void
1704 3 : tcp_input_trace_frame (vlib_main_t *vm, vlib_node_runtime_t *node,
1705 : vlib_buffer_t **bs, u16 *nexts, u32 n_bufs, u8 is_ip4)
1706 : {
1707 : tcp_connection_t *tc;
1708 : tcp_header_t *tcp;
1709 : tcp_rx_trace_t *t;
1710 : u8 flags;
1711 : int i;
1712 :
1713 6 : for (i = 0; i < n_bufs; i++)
1714 : {
1715 3 : if (!(bs[i]->flags & VLIB_BUFFER_IS_TRACED))
1716 0 : continue;
1717 :
1718 3 : t = vlib_add_trace (vm, node, bs[i], sizeof (*t));
1719 3 : if (nexts[i] == TCP_INPUT_NEXT_DROP || nexts[i] == TCP_INPUT_NEXT_PUNT ||
1720 3 : nexts[i] == TCP_INPUT_NEXT_RESET)
1721 : {
1722 0 : tc = 0;
1723 : }
1724 : else
1725 : {
1726 3 : flags = vnet_buffer (bs[i])->tcp.flags;
1727 :
1728 3 : if (flags == TCP_STATE_LISTEN)
1729 3 : tc = tcp_listener_get (vnet_buffer (bs[i])->tcp.connection_index);
1730 0 : else if (flags == TCP_STATE_SYN_SENT)
1731 0 : tc = tcp_half_open_connection_get (
1732 0 : vnet_buffer (bs[i])->tcp.connection_index);
1733 : else
1734 0 : tc = tcp_connection_get (vnet_buffer (bs[i])->tcp.connection_index,
1735 : vm->thread_index);
1736 : }
1737 3 : tcp = tcp_buffer_hdr (bs[i]);
1738 3 : tcp_set_rx_trace_data (t, tc, tcp, bs[i], is_ip4);
1739 : }
1740 3 : }
1741 :
1742 : always_inline uword
1743 33 : tcp46_syn_sent_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
1744 : vlib_frame_t *frame, int is_ip4)
1745 : {
1746 33 : u32 n_left_from, *from, thread_index = vm->thread_index;
1747 33 : tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
1748 : vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
1749 :
1750 33 : from = vlib_frame_vector_args (frame);
1751 33 : n_left_from = frame->n_vectors;
1752 :
1753 33 : if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
1754 0 : tcp46_syn_sent_trace_frame (vm, node, from, n_left_from);
1755 :
1756 33 : vlib_get_buffers (vm, from, bufs, n_left_from);
1757 33 : b = bufs;
1758 :
1759 165 : while (n_left_from > 0)
1760 : {
1761 132 : u32 ack, seq, error = TCP_ERROR_NONE;
1762 : tcp_connection_t *tc, *new_tc;
1763 : tcp_header_t *tcp;
1764 :
1765 264 : tc = tcp_half_open_connection_get (
1766 132 : vnet_buffer (b[0])->tcp.connection_index);
1767 132 : if (PREDICT_FALSE (tc == 0))
1768 : {
1769 0 : error = TCP_ERROR_INVALID_CONNECTION;
1770 0 : goto drop;
1771 : }
1772 :
1773 : /* Half-open completed or cancelled recently but the connection
1774 : * was't removed yet by the owning thread */
1775 132 : if (PREDICT_FALSE (tc->flags & TCP_CONN_HALF_OPEN_DONE))
1776 : {
1777 0 : error = TCP_ERROR_SPURIOUS_SYN_ACK;
1778 0 : goto drop;
1779 : }
1780 :
1781 132 : ack = vnet_buffer (b[0])->tcp.ack_number;
1782 132 : seq = vnet_buffer (b[0])->tcp.seq_number;
1783 132 : tcp = tcp_buffer_hdr (b[0]);
1784 :
1785 : /* Crude check to see if the connection handle does not match
1786 : * the packet. Probably connection just switched to established */
1787 132 : if (PREDICT_FALSE (tcp->dst_port != tc->c_lcl_port ||
1788 : tcp->src_port != tc->c_rmt_port))
1789 : {
1790 0 : error = TCP_ERROR_INVALID_CONNECTION;
1791 0 : goto drop;
1792 : }
1793 :
1794 132 : if (PREDICT_FALSE (!tcp_ack (tcp) && !tcp_rst (tcp) && !tcp_syn (tcp)))
1795 : {
1796 0 : error = TCP_ERROR_SEGMENT_INVALID;
1797 0 : goto drop;
1798 : }
1799 :
1800 : /* SYNs consume sequence numbers */
1801 132 : vnet_buffer (b[0])->tcp.seq_end += tcp_is_syn (tcp);
1802 :
1803 : /*
1804 : * 1. check the ACK bit
1805 : */
1806 :
1807 : /*
1808 : * If the ACK bit is set
1809 : * If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send a reset (unless
1810 : * the RST bit is set, if so drop the segment and return)
1811 : * <SEQ=SEG.ACK><CTL=RST>
1812 : * and discard the segment. Return.
1813 : * If SND.UNA =< SEG.ACK =< SND.NXT then the ACK is acceptable.
1814 : */
1815 132 : if (tcp_ack (tcp))
1816 : {
1817 132 : if (seq_leq (ack, tc->iss) || seq_gt (ack, tc->snd_nxt))
1818 : {
1819 0 : if (!tcp_rst (tcp))
1820 0 : tcp_send_reset_w_pkt (tc, b[0], thread_index, is_ip4);
1821 0 : error = TCP_ERROR_RCV_WND;
1822 0 : goto drop;
1823 : }
1824 :
1825 : /* Make sure ACK is valid */
1826 132 : if (seq_gt (tc->snd_una, ack))
1827 : {
1828 0 : error = TCP_ERROR_ACK_INVALID;
1829 0 : goto drop;
1830 : }
1831 : }
1832 :
1833 : /*
1834 : * 2. check the RST bit
1835 : */
1836 :
1837 132 : if (tcp_rst (tcp))
1838 : {
1839 : /* If ACK is acceptable, signal client that peer is not
1840 : * willing to accept connection and drop connection*/
1841 0 : if (tcp_ack (tcp))
1842 0 : tcp_rcv_rst (wrk, tc);
1843 0 : error = TCP_ERROR_RST_RCVD;
1844 0 : goto drop;
1845 : }
1846 :
1847 : /*
1848 : * 3. check the security and precedence (skipped)
1849 : */
1850 :
1851 : /*
1852 : * 4. check the SYN bit
1853 : */
1854 :
1855 : /* No SYN flag. Drop. */
1856 132 : if (!tcp_syn (tcp))
1857 : {
1858 0 : error = TCP_ERROR_SEGMENT_INVALID;
1859 0 : goto drop;
1860 : }
1861 :
1862 : /* Parse options */
1863 132 : if (tcp_options_parse (tcp, &tc->rcv_opts, 1))
1864 : {
1865 0 : error = TCP_ERROR_OPTIONS;
1866 0 : goto drop;
1867 : }
1868 :
1869 : /* Valid SYN or SYN-ACK. Move connection from half-open pool to
1870 : * current thread pool. */
1871 132 : new_tc = tcp_connection_alloc_w_base (thread_index, &tc);
1872 132 : new_tc->rcv_nxt = vnet_buffer (b[0])->tcp.seq_end;
1873 132 : new_tc->irs = seq;
1874 132 : new_tc->timers[TCP_TIMER_RETRANSMIT_SYN] = TCP_TIMER_HANDLE_INVALID;
1875 :
1876 132 : if (tcp_opts_tstamp (&new_tc->rcv_opts))
1877 : {
1878 132 : new_tc->tsval_recent = new_tc->rcv_opts.tsval;
1879 132 : new_tc->tsval_recent_age = tcp_time_tstamp (thread_index);
1880 : }
1881 :
1882 132 : if (tcp_opts_wscale (&new_tc->rcv_opts))
1883 132 : new_tc->snd_wscale = new_tc->rcv_opts.wscale;
1884 : else
1885 0 : new_tc->rcv_wscale = 0;
1886 :
1887 132 : new_tc->snd_wnd = clib_net_to_host_u16 (tcp->window)
1888 132 : << new_tc->snd_wscale;
1889 132 : new_tc->snd_wl1 = seq;
1890 132 : new_tc->snd_wl2 = ack;
1891 :
1892 132 : tcp_connection_init_vars (new_tc);
1893 :
1894 : /* SYN-ACK: See if we can switch to ESTABLISHED state */
1895 132 : if (PREDICT_TRUE (tcp_ack (tcp)))
1896 : {
1897 : /* Our SYN is ACKed: we have iss < ack = snd_una */
1898 :
1899 : /* TODO Dequeue acknowledged segments if we support Fast Open */
1900 132 : new_tc->snd_una = ack;
1901 132 : new_tc->state = TCP_STATE_ESTABLISHED;
1902 :
1903 : /* Make sure las is initialized for the wnd computation */
1904 132 : new_tc->rcv_las = new_tc->rcv_nxt;
1905 :
1906 : /* Notify app that we have connection. If session layer can't
1907 : * allocate session send reset */
1908 132 : if (session_stream_connect_notify (&new_tc->connection,
1909 : SESSION_E_NONE))
1910 : {
1911 0 : tcp_send_reset_w_pkt (new_tc, b[0], thread_index, is_ip4);
1912 0 : tcp_connection_cleanup (new_tc);
1913 0 : error = TCP_ERROR_CREATE_SESSION_FAIL;
1914 0 : goto cleanup_ho;
1915 : }
1916 :
1917 132 : transport_fifos_init_ooo (&new_tc->connection);
1918 132 : new_tc->tx_fifo_size = transport_tx_fifo_size (&new_tc->connection);
1919 : /* Update rtt with the syn-ack sample */
1920 132 : tcp_estimate_initial_rtt (new_tc);
1921 : TCP_EVT (TCP_EVT_SYNACK_RCVD, new_tc);
1922 132 : error = TCP_ERROR_SYN_ACKS_RCVD;
1923 : }
1924 : /* SYN: Simultaneous open. Change state to SYN-RCVD and send SYN-ACK */
1925 : else
1926 : {
1927 0 : new_tc->state = TCP_STATE_SYN_RCVD;
1928 :
1929 : /* Notify app that we have connection */
1930 0 : if (session_stream_connect_notify (&new_tc->connection,
1931 : SESSION_E_NONE))
1932 : {
1933 0 : tcp_connection_cleanup (new_tc);
1934 0 : tcp_send_reset_w_pkt (tc, b[0], thread_index, is_ip4);
1935 : TCP_EVT (TCP_EVT_RST_SENT, tc);
1936 0 : error = TCP_ERROR_CREATE_SESSION_FAIL;
1937 0 : goto cleanup_ho;
1938 : }
1939 :
1940 0 : transport_fifos_init_ooo (&new_tc->connection);
1941 0 : new_tc->tx_fifo_size = transport_tx_fifo_size (&new_tc->connection);
1942 0 : new_tc->rtt_ts = 0;
1943 0 : tcp_init_snd_vars (new_tc);
1944 0 : tcp_send_synack (new_tc);
1945 0 : error = TCP_ERROR_SYNS_RCVD;
1946 0 : goto cleanup_ho;
1947 : }
1948 :
1949 132 : if (!(new_tc->cfg_flags & TCP_CFG_F_NO_TSO))
1950 0 : tcp_check_tx_offload (new_tc, is_ip4);
1951 :
1952 : /* Read data, if any */
1953 132 : if (PREDICT_FALSE (vnet_buffer (b[0])->tcp.data_len))
1954 : {
1955 0 : clib_warning ("rcvd data in syn-sent");
1956 0 : error = tcp_segment_rcv (wrk, new_tc, b[0]);
1957 0 : if (error == TCP_ERROR_ACK_OK)
1958 0 : error = TCP_ERROR_SYN_ACKS_RCVD;
1959 : }
1960 : else
1961 : {
1962 : /* Send ack now instead of programming it because connection was
1963 : * just established and it's not optional. */
1964 132 : tcp_send_ack (new_tc);
1965 : }
1966 :
1967 132 : cleanup_ho:
1968 :
1969 : /* If this is not the owning thread, wait for syn retransmit to
1970 : * expire and cleanup then */
1971 132 : if (tcp_half_open_connection_cleanup (tc))
1972 0 : tc->flags |= TCP_CONN_HALF_OPEN_DONE;
1973 :
1974 132 : drop:
1975 :
1976 132 : b += 1;
1977 132 : n_left_from -= 1;
1978 132 : tcp_inc_counter (syn_sent, error, 1);
1979 : }
1980 :
1981 33 : session_main_flush_enqueue_events (TRANSPORT_PROTO_TCP, thread_index);
1982 33 : vlib_buffer_free (vm, from, frame->n_vectors);
1983 33 : tcp_handle_disconnects (wrk);
1984 :
1985 33 : return frame->n_vectors;
1986 : }
1987 :
1988 2332 : VLIB_NODE_FN (tcp4_syn_sent_node) (vlib_main_t * vm,
1989 : vlib_node_runtime_t * node,
1990 : vlib_frame_t * from_frame)
1991 : {
1992 32 : return tcp46_syn_sent_inline (vm, node, from_frame, 1 /* is_ip4 */ );
1993 : }
1994 :
1995 2301 : VLIB_NODE_FN (tcp6_syn_sent_node) (vlib_main_t * vm,
1996 : vlib_node_runtime_t * node,
1997 : vlib_frame_t * from_frame)
1998 : {
1999 1 : return tcp46_syn_sent_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2000 : }
2001 :
2002 : /* *INDENT-OFF* */
2003 183788 : VLIB_REGISTER_NODE (tcp4_syn_sent_node) =
2004 : {
2005 : .name = "tcp4-syn-sent",
2006 : /* Takes a vector of packets. */
2007 : .vector_size = sizeof (u32),
2008 : .n_errors = TCP_N_ERROR,
2009 : .error_counters = tcp_input_error_counters,
2010 : .format_trace = format_tcp_rx_trace_short,
2011 : };
2012 : /* *INDENT-ON* */
2013 :
2014 : /* *INDENT-OFF* */
2015 183788 : VLIB_REGISTER_NODE (tcp6_syn_sent_node) =
2016 : {
2017 : .name = "tcp6-syn-sent",
2018 : /* Takes a vector of packets. */
2019 : .vector_size = sizeof (u32),
2020 : .n_errors = TCP_N_ERROR,
2021 : .error_counters = tcp_input_error_counters,
2022 : .format_trace = format_tcp_rx_trace_short,
2023 : };
2024 : /* *INDENT-ON* */
2025 :
2026 : static void
2027 0 : tcp46_rcv_process_trace_frame (vlib_main_t *vm, vlib_node_runtime_t *node,
2028 : u32 *from, u32 n_bufs)
2029 : {
2030 0 : u32 thread_index = vm->thread_index;
2031 0 : tcp_connection_t *tc = 0;
2032 : tcp_rx_trace_t *t;
2033 : vlib_buffer_t *b;
2034 : int i;
2035 :
2036 0 : for (i = 0; i < n_bufs; i++)
2037 : {
2038 0 : b = vlib_get_buffer (vm, from[i]);
2039 0 : if (!(b->flags & VLIB_BUFFER_IS_TRACED))
2040 0 : continue;
2041 0 : tc = tcp_connection_get (vnet_buffer (b)->tcp.connection_index,
2042 : thread_index);
2043 0 : t = vlib_add_trace (vm, node, b, sizeof (*t));
2044 0 : tcp_set_rx_trace_data (t, tc, tcp_buffer_hdr (b), b, 1);
2045 : }
2046 0 : }
2047 :
2048 : /**
2049 : * Handles reception for all states except LISTEN, SYN-SENT and ESTABLISHED
2050 : * as per RFC793 p. 64
2051 : */
2052 : always_inline uword
2053 106 : tcp46_rcv_process_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
2054 : vlib_frame_t *frame, int is_ip4)
2055 : {
2056 106 : u32 thread_index = vm->thread_index, n_left_from, *from, max_deq;
2057 106 : tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
2058 : vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
2059 :
2060 106 : from = vlib_frame_vector_args (frame);
2061 106 : n_left_from = frame->n_vectors;
2062 :
2063 106 : if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
2064 0 : tcp46_rcv_process_trace_frame (vm, node, from, n_left_from);
2065 :
2066 106 : vlib_get_buffers (vm, from, bufs, n_left_from);
2067 106 : b = bufs;
2068 :
2069 627 : while (n_left_from > 0)
2070 : {
2071 521 : u32 error = TCP_ERROR_NONE;
2072 521 : tcp_header_t *tcp = 0;
2073 : tcp_connection_t *tc;
2074 : u8 is_fin;
2075 :
2076 521 : tc = tcp_connection_get (vnet_buffer (b[0])->tcp.connection_index,
2077 : thread_index);
2078 521 : if (PREDICT_FALSE (tc == 0))
2079 : {
2080 0 : error = TCP_ERROR_INVALID_CONNECTION;
2081 0 : goto drop;
2082 : }
2083 :
2084 521 : tcp = tcp_buffer_hdr (b[0]);
2085 521 : is_fin = tcp_is_fin (tcp);
2086 :
2087 : if (CLIB_DEBUG)
2088 : {
2089 521 : if (!(tc->connection.flags & TRANSPORT_CONNECTION_F_NO_LOOKUP))
2090 : {
2091 : tcp_connection_t *tmp;
2092 521 : tmp = tcp_lookup_connection (tc->c_fib_index, b[0], thread_index,
2093 : is_ip4);
2094 521 : if (tmp->state != tc->state)
2095 : {
2096 0 : if (tc->state != TCP_STATE_CLOSED)
2097 0 : clib_warning ("state changed");
2098 0 : goto drop;
2099 : }
2100 : }
2101 : }
2102 :
2103 : /*
2104 : * Special treatment for CLOSED
2105 : */
2106 521 : if (PREDICT_FALSE (tc->state == TCP_STATE_CLOSED))
2107 : {
2108 0 : error = TCP_ERROR_CONNECTION_CLOSED;
2109 0 : goto drop;
2110 : }
2111 :
2112 : /*
2113 : * For all other states (except LISTEN)
2114 : */
2115 :
2116 : /* 1-4: check SEQ, RST, SYN */
2117 521 : if (PREDICT_FALSE (tcp_segment_validate (wrk, tc, b[0], tcp, &error)))
2118 0 : goto drop;
2119 :
2120 : /* 5: check the ACK field */
2121 521 : switch (tc->state)
2122 : {
2123 132 : case TCP_STATE_SYN_RCVD:
2124 :
2125 : /* Make sure the segment is exactly right */
2126 132 : if (tc->rcv_nxt != vnet_buffer (b[0])->tcp.seq_number)
2127 : {
2128 0 : tcp_send_reset_w_pkt (tc, b[0], thread_index, is_ip4);
2129 0 : error = TCP_ERROR_SEGMENT_INVALID;
2130 0 : goto drop;
2131 : }
2132 :
2133 : /*
2134 : * If the segment acknowledgment is not acceptable, form a
2135 : * reset segment,
2136 : * <SEQ=SEG.ACK><CTL=RST>
2137 : * and send it.
2138 : */
2139 132 : if (tcp_rcv_ack_no_cc (tc, b[0], &error))
2140 : {
2141 0 : tcp_send_reset_w_pkt (tc, b[0], thread_index, is_ip4);
2142 0 : error = TCP_ERROR_SEGMENT_INVALID;
2143 0 : goto drop;
2144 : }
2145 :
2146 : /* Avoid notifying app if connection is about to be closed */
2147 132 : if (PREDICT_FALSE (is_fin))
2148 0 : break;
2149 :
2150 : /* Update rtt and rto */
2151 132 : tcp_estimate_initial_rtt (tc);
2152 132 : tcp_connection_tx_pacer_update (tc);
2153 :
2154 : /* Switch state to ESTABLISHED */
2155 132 : tc->state = TCP_STATE_ESTABLISHED;
2156 : TCP_EVT (TCP_EVT_STATE_CHANGE, tc);
2157 :
2158 132 : if (!(tc->cfg_flags & TCP_CFG_F_NO_TSO))
2159 0 : tcp_check_tx_offload (tc, is_ip4);
2160 :
2161 : /* Initialize session variables */
2162 132 : tc->snd_una = vnet_buffer (b[0])->tcp.ack_number;
2163 132 : tc->snd_wnd = clib_net_to_host_u16 (tcp->window)
2164 132 : << tc->rcv_opts.wscale;
2165 132 : tc->snd_wl1 = vnet_buffer (b[0])->tcp.seq_number;
2166 132 : tc->snd_wl2 = vnet_buffer (b[0])->tcp.ack_number;
2167 :
2168 : /* Reset SYN-ACK retransmit and SYN_RCV establish timers */
2169 132 : tcp_retransmit_timer_reset (&wrk->timer_wheel, tc);
2170 132 : if (session_stream_accept_notify (&tc->connection))
2171 : {
2172 0 : error = TCP_ERROR_MSG_QUEUE_FULL;
2173 0 : tcp_send_reset (tc);
2174 0 : session_transport_delete_notify (&tc->connection);
2175 0 : tcp_connection_cleanup (tc);
2176 0 : goto drop;
2177 : }
2178 132 : error = TCP_ERROR_ACK_OK;
2179 132 : break;
2180 1 : case TCP_STATE_ESTABLISHED:
2181 : /* We can get packets in established state here because they
2182 : * were enqueued before state change */
2183 1 : if (tcp_rcv_ack (wrk, tc, b[0], tcp, &error))
2184 0 : goto drop;
2185 :
2186 1 : break;
2187 131 : case TCP_STATE_FIN_WAIT_1:
2188 : /* In addition to the processing for the ESTABLISHED state, if
2189 : * our FIN is now acknowledged then enter FIN-WAIT-2 and
2190 : * continue processing in that state. */
2191 131 : if (tcp_rcv_ack (wrk, tc, b[0], tcp, &error))
2192 0 : goto drop;
2193 :
2194 : /* Still have to send the FIN */
2195 131 : if (tc->flags & TCP_CONN_FINPNDG)
2196 : {
2197 : /* TX fifo finally drained */
2198 2 : max_deq = transport_max_tx_dequeue (&tc->connection);
2199 2 : if (max_deq <= tc->burst_acked)
2200 2 : tcp_send_fin (tc);
2201 : /* If a fin was received and data was acked extend wait */
2202 0 : else if ((tc->flags & TCP_CONN_FINRCVD) && tc->bytes_acked)
2203 0 : tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_WAITCLOSE,
2204 : tcp_cfg.closewait_time);
2205 : }
2206 : /* If FIN is ACKed */
2207 129 : else if (tc->snd_una == tc->snd_nxt)
2208 : {
2209 : /* Stop all retransmit timers because we have nothing more
2210 : * to send. */
2211 127 : tcp_connection_timers_reset (tc);
2212 :
2213 : /* We already have a FIN but didn't transition to CLOSING
2214 : * because of outstanding tx data. Close the connection. */
2215 127 : if (tc->flags & TCP_CONN_FINRCVD)
2216 : {
2217 0 : tcp_connection_set_state (tc, TCP_STATE_CLOSED);
2218 0 : session_transport_closed_notify (&tc->connection);
2219 0 : tcp_program_cleanup (wrk, tc);
2220 0 : goto drop;
2221 : }
2222 :
2223 127 : tcp_connection_set_state (tc, TCP_STATE_FIN_WAIT_2);
2224 : /* Enable waitclose because we're willing to wait for peer's
2225 : * FIN but not indefinitely. */
2226 127 : tcp_timer_set (&wrk->timer_wheel, tc, TCP_TIMER_WAITCLOSE,
2227 : tcp_cfg.finwait2_time);
2228 :
2229 : /* Don't try to deq the FIN acked */
2230 127 : if (tc->burst_acked > 1)
2231 0 : session_tx_fifo_dequeue_drop (&tc->connection,
2232 0 : tc->burst_acked - 1);
2233 127 : tc->burst_acked = 0;
2234 : }
2235 131 : break;
2236 126 : case TCP_STATE_FIN_WAIT_2:
2237 : /* In addition to the processing for the ESTABLISHED state, if
2238 : * the retransmission queue is empty, the user's CLOSE can be
2239 : * acknowledged ("ok") but do not delete the TCB. */
2240 126 : if (tcp_rcv_ack_no_cc (tc, b[0], &error))
2241 0 : goto drop;
2242 126 : tc->burst_acked = 0;
2243 126 : break;
2244 2 : case TCP_STATE_CLOSE_WAIT:
2245 : /* Do the same processing as for the ESTABLISHED state. */
2246 2 : if (tcp_rcv_ack (wrk, tc, b[0], tcp, &error))
2247 0 : goto drop;
2248 :
2249 2 : if (!(tc->flags & TCP_CONN_FINPNDG))
2250 0 : break;
2251 :
2252 : /* Still have outstanding tx data */
2253 2 : max_deq = transport_max_tx_dequeue (&tc->connection);
2254 2 : if (max_deq > tc->burst_acked)
2255 0 : break;
2256 :
2257 2 : tcp_send_fin (tc);
2258 2 : tcp_connection_timers_reset (tc);
2259 2 : tcp_connection_set_state (tc, TCP_STATE_LAST_ACK);
2260 2 : tcp_timer_set (&wrk->timer_wheel, tc, TCP_TIMER_WAITCLOSE,
2261 : tcp_cfg.lastack_time);
2262 2 : break;
2263 2 : case TCP_STATE_CLOSING:
2264 : /* In addition to the processing for the ESTABLISHED state, if
2265 : * the ACK acknowledges our FIN then enter the TIME-WAIT state,
2266 : * otherwise ignore the segment. */
2267 2 : if (tcp_rcv_ack_no_cc (tc, b[0], &error))
2268 0 : goto drop;
2269 :
2270 2 : if (tc->snd_una != tc->snd_nxt)
2271 0 : goto drop;
2272 :
2273 2 : tcp_connection_timers_reset (tc);
2274 2 : tcp_connection_set_state (tc, TCP_STATE_TIME_WAIT);
2275 2 : tcp_timer_set (&wrk->timer_wheel, tc, TCP_TIMER_WAITCLOSE,
2276 : tcp_cfg.timewait_time);
2277 2 : session_transport_closed_notify (&tc->connection);
2278 2 : goto drop;
2279 :
2280 : break;
2281 127 : case TCP_STATE_LAST_ACK:
2282 : /* The only thing that [should] arrive in this state is an
2283 : * acknowledgment of our FIN. If our FIN is now acknowledged,
2284 : * delete the TCB, enter the CLOSED state, and return. */
2285 :
2286 127 : if (tcp_rcv_ack_no_cc (tc, b[0], &error))
2287 0 : goto drop;
2288 :
2289 : /* Apparently our ACK for the peer's FIN was lost */
2290 127 : if (is_fin && tc->snd_una != tc->snd_nxt)
2291 : {
2292 0 : tcp_send_fin (tc);
2293 0 : goto drop;
2294 : }
2295 :
2296 127 : tcp_connection_set_state (tc, TCP_STATE_CLOSED);
2297 127 : session_transport_closed_notify (&tc->connection);
2298 :
2299 : /* Don't free the connection from the data path since
2300 : * we can't ensure that we have no packets already enqueued
2301 : * to output. Rely instead on the waitclose timer */
2302 127 : tcp_connection_timers_reset (tc);
2303 127 : tcp_program_cleanup (tcp_get_worker (tc->c_thread_index), tc);
2304 :
2305 127 : goto drop;
2306 :
2307 : break;
2308 0 : case TCP_STATE_TIME_WAIT:
2309 : /* The only thing that can arrive in this state is a
2310 : * retransmission of the remote FIN. Acknowledge it, and restart
2311 : * the 2 MSL timeout. */
2312 :
2313 0 : if (tcp_rcv_ack_no_cc (tc, b[0], &error))
2314 0 : goto drop;
2315 :
2316 0 : if (!is_fin)
2317 0 : goto drop;
2318 :
2319 0 : tcp_program_ack (tc);
2320 0 : tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_WAITCLOSE,
2321 : tcp_cfg.timewait_time);
2322 0 : goto drop;
2323 :
2324 : break;
2325 0 : default:
2326 0 : ASSERT (0);
2327 : }
2328 :
2329 : /* 6: check the URG bit TODO */
2330 :
2331 : /* 7: process the segment text */
2332 392 : switch (tc->state)
2333 : {
2334 390 : case TCP_STATE_ESTABLISHED:
2335 : case TCP_STATE_FIN_WAIT_1:
2336 : case TCP_STATE_FIN_WAIT_2:
2337 390 : if (vnet_buffer (b[0])->tcp.data_len)
2338 3 : error = tcp_segment_rcv (wrk, tc, b[0]);
2339 : /* Don't accept out of order fins lower */
2340 390 : if (vnet_buffer (b[0])->tcp.seq_end != tc->rcv_nxt)
2341 0 : goto drop;
2342 390 : break;
2343 2 : case TCP_STATE_CLOSE_WAIT:
2344 : case TCP_STATE_CLOSING:
2345 : case TCP_STATE_LAST_ACK:
2346 : case TCP_STATE_TIME_WAIT:
2347 : /* This should not occur, since a FIN has been received from the
2348 : * remote side. Ignore the segment text. */
2349 2 : break;
2350 : }
2351 :
2352 : /* 8: check the FIN bit */
2353 392 : if (!is_fin)
2354 263 : goto drop;
2355 :
2356 : TCP_EVT (TCP_EVT_FIN_RCVD, tc);
2357 :
2358 129 : switch (tc->state)
2359 : {
2360 0 : case TCP_STATE_ESTABLISHED:
2361 : /* Account for the FIN and send ack */
2362 0 : tc->rcv_nxt += 1;
2363 0 : tcp_program_ack (tc);
2364 0 : tcp_connection_set_state (tc, TCP_STATE_CLOSE_WAIT);
2365 0 : tcp_program_disconnect (wrk, tc);
2366 0 : tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_WAITCLOSE,
2367 : tcp_cfg.closewait_time);
2368 0 : break;
2369 0 : case TCP_STATE_SYN_RCVD:
2370 : /* Send FIN-ACK and enter TIME-WAIT, as opposed to LAST-ACK,
2371 : * because the app was not notified yet and we want to avoid
2372 : * session state transitions to ensure cleanup does not
2373 : * propagate to app. */
2374 0 : tcp_connection_timers_reset (tc);
2375 0 : tc->rcv_nxt += 1;
2376 0 : tcp_send_fin (tc);
2377 0 : tcp_connection_set_state (tc, TCP_STATE_TIME_WAIT);
2378 0 : tcp_program_cleanup (wrk, tc);
2379 0 : break;
2380 0 : case TCP_STATE_CLOSE_WAIT:
2381 : case TCP_STATE_CLOSING:
2382 : case TCP_STATE_LAST_ACK:
2383 : /* move along .. */
2384 0 : break;
2385 2 : case TCP_STATE_FIN_WAIT_1:
2386 2 : tc->rcv_nxt += 1;
2387 :
2388 2 : if (tc->flags & TCP_CONN_FINPNDG)
2389 : {
2390 : /* If data is outstanding, stay in FIN_WAIT_1 and try to finish
2391 : * sending it. Since we already received a fin, do not wait
2392 : * for too long. */
2393 0 : tc->flags |= TCP_CONN_FINRCVD;
2394 0 : tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_WAITCLOSE,
2395 : tcp_cfg.closewait_time);
2396 : }
2397 : else
2398 : {
2399 2 : tcp_connection_set_state (tc, TCP_STATE_CLOSING);
2400 2 : tcp_program_ack (tc);
2401 : /* Wait for ACK for our FIN but not forever */
2402 2 : tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_WAITCLOSE,
2403 : tcp_cfg.closing_time);
2404 : }
2405 2 : break;
2406 127 : case TCP_STATE_FIN_WAIT_2:
2407 : /* Got FIN, send ACK! Be more aggressive with resource cleanup */
2408 127 : tc->rcv_nxt += 1;
2409 127 : tcp_connection_set_state (tc, TCP_STATE_TIME_WAIT);
2410 127 : tcp_connection_timers_reset (tc);
2411 127 : tcp_timer_set (&wrk->timer_wheel, tc, TCP_TIMER_WAITCLOSE,
2412 : tcp_cfg.timewait_time);
2413 127 : tcp_program_ack (tc);
2414 127 : session_transport_closed_notify (&tc->connection);
2415 127 : break;
2416 0 : case TCP_STATE_TIME_WAIT:
2417 : /* Remain in the TIME-WAIT state. Restart the time-wait
2418 : * timeout.
2419 : */
2420 0 : tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_WAITCLOSE,
2421 : tcp_cfg.timewait_time);
2422 0 : break;
2423 : }
2424 129 : error = TCP_ERROR_FIN_RCVD;
2425 :
2426 521 : drop:
2427 :
2428 521 : b += 1;
2429 521 : n_left_from -= 1;
2430 521 : tcp_inc_counter (rcv_process, error, 1);
2431 : }
2432 :
2433 106 : session_main_flush_enqueue_events (TRANSPORT_PROTO_TCP, thread_index);
2434 106 : tcp_handle_postponed_dequeues (wrk);
2435 106 : tcp_handle_disconnects (wrk);
2436 106 : vlib_buffer_free (vm, from, frame->n_vectors);
2437 :
2438 106 : return frame->n_vectors;
2439 : }
2440 :
2441 2402 : VLIB_NODE_FN (tcp4_rcv_process_node) (vlib_main_t * vm,
2442 : vlib_node_runtime_t * node,
2443 : vlib_frame_t * from_frame)
2444 : {
2445 102 : return tcp46_rcv_process_inline (vm, node, from_frame, 1 /* is_ip4 */ );
2446 : }
2447 :
2448 2304 : VLIB_NODE_FN (tcp6_rcv_process_node) (vlib_main_t * vm,
2449 : vlib_node_runtime_t * node,
2450 : vlib_frame_t * from_frame)
2451 : {
2452 4 : return tcp46_rcv_process_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2453 : }
2454 :
2455 : /* *INDENT-OFF* */
2456 183788 : VLIB_REGISTER_NODE (tcp4_rcv_process_node) = {
2457 : .name = "tcp4-rcv-process",
2458 : /* Takes a vector of packets. */
2459 : .vector_size = sizeof (u32),
2460 : .n_errors = TCP_N_ERROR,
2461 : .error_counters = tcp_input_error_counters,
2462 : .format_trace = format_tcp_rx_trace_short,
2463 : };
2464 : /* *INDENT-ON* */
2465 :
2466 : /* *INDENT-OFF* */
2467 183788 : VLIB_REGISTER_NODE (tcp6_rcv_process_node) = {
2468 : .name = "tcp6-rcv-process",
2469 : /* Takes a vector of packets. */
2470 : .vector_size = sizeof (u32),
2471 : .n_errors = TCP_N_ERROR,
2472 : .error_counters = tcp_input_error_counters,
2473 : .format_trace = format_tcp_rx_trace_short,
2474 : };
2475 : /* *INDENT-ON* */
2476 :
2477 : static void
2478 3 : tcp46_listen_trace_frame (vlib_main_t *vm, vlib_node_runtime_t *node,
2479 : u32 *to_next, u32 n_bufs)
2480 : {
2481 3 : tcp_connection_t *tc = 0;
2482 : tcp_rx_trace_t *t;
2483 : vlib_buffer_t *b;
2484 : int i;
2485 :
2486 6 : for (i = 0; i < n_bufs; i++)
2487 : {
2488 3 : b = vlib_get_buffer (vm, to_next[i]);
2489 3 : if (!(b->flags & VLIB_BUFFER_IS_TRACED))
2490 0 : continue;
2491 3 : if (vnet_buffer (b)->tcp.flags == TCP_STATE_LISTEN)
2492 3 : tc = tcp_listener_get (vnet_buffer (b)->tcp.connection_index);
2493 3 : t = vlib_add_trace (vm, node, b, sizeof (*t));
2494 3 : tcp_set_rx_trace_data (t, tc, tcp_buffer_hdr (b), b, 1);
2495 : }
2496 3 : }
2497 :
2498 : /**
2499 : * SYN received in TIME-WAIT state.
2500 : *
2501 : * RFC 1122:
2502 : * "When a connection is [...] on TIME-WAIT state [...]
2503 : * [a TCP] MAY accept a new SYN from the remote TCP to
2504 : * reopen the connection directly, if it:
2505 : *
2506 : * (1) assigns its initial sequence number for the new
2507 : * connection to be larger than the largest sequence
2508 : * number it used on the previous connection incarnation,
2509 : * and
2510 : *
2511 : * (2) returns to TIME-WAIT state if the SYN turns out
2512 : * to be an old duplicate".
2513 : *
2514 : * The function returns true if the syn can be accepted during
2515 : * connection time-wait (port reuse). In this case the function
2516 : * also calculates what the iss should be for the new connection.
2517 : */
2518 : always_inline int
2519 0 : syn_during_timewait (tcp_connection_t *tc, vlib_buffer_t *b, u32 *iss)
2520 : {
2521 0 : int paws_reject = tcp_segment_check_paws (tc);
2522 : u32 tw_iss;
2523 :
2524 0 : *iss = 0;
2525 : /* Check that the SYN arrived out of window. We accept it */
2526 0 : if (!paws_reject &&
2527 0 : (seq_geq (vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt) ||
2528 0 : (tcp_opts_tstamp (&tc->rcv_opts) &&
2529 0 : timestamp_lt (tc->tsval_recent, tc->rcv_opts.tsval))))
2530 : {
2531 : /* Set the iss of the new connection to be the largest sequence number
2532 : * the old peer would have accepted and add some random number
2533 : */
2534 0 : tw_iss = tc->snd_nxt + tcp_available_snd_wnd (tc) +
2535 0 : (uword) (tcp_time_now_us (tc->c_thread_index) * 1e6) % 65535;
2536 0 : if (tw_iss == 0)
2537 0 : tw_iss++;
2538 0 : *iss = tw_iss;
2539 :
2540 0 : return 1;
2541 : }
2542 : else
2543 : {
2544 : TCP_DBG (
2545 : "ERROR not accepting SYN in timewait,paws_reject=%d, seq_num =%ld, "
2546 : "rcv_nxt=%ld, tstamp_present=%d, tsval_recent = %d, tsval = %d\n",
2547 : paws_reject, vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt,
2548 : tcp_opts_tstamp (&tc->rcv_opts), tc->tsval_recent, tc->rcv_opts.tsval);
2549 0 : return 0;
2550 : }
2551 : }
2552 :
2553 : /**
2554 : * LISTEN state processing as per RFC 793 p. 65
2555 : */
2556 : always_inline uword
2557 36 : tcp46_listen_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
2558 : vlib_frame_t *frame, int is_ip4)
2559 : {
2560 36 : u32 n_left_from, *from, n_syns = 0;
2561 : vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
2562 36 : u32 thread_index = vm->thread_index;
2563 36 : u32 tw_iss = 0;
2564 :
2565 36 : from = vlib_frame_vector_args (frame);
2566 36 : n_left_from = frame->n_vectors;
2567 :
2568 36 : if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
2569 3 : tcp46_listen_trace_frame (vm, node, from, n_left_from);
2570 :
2571 36 : vlib_get_buffers (vm, from, bufs, n_left_from);
2572 36 : b = bufs;
2573 :
2574 171 : while (n_left_from > 0)
2575 : {
2576 : tcp_connection_t *lc, *child;
2577 :
2578 : /* Flags initialized with connection state after lookup */
2579 135 : if (vnet_buffer (b[0])->tcp.flags == TCP_STATE_LISTEN)
2580 : {
2581 135 : lc = tcp_listener_get (vnet_buffer (b[0])->tcp.connection_index);
2582 : }
2583 : /* Probably we are in time-wait or closed state */
2584 : else
2585 : {
2586 : tcp_connection_t *tc;
2587 0 : tc = tcp_connection_get (vnet_buffer (b[0])->tcp.connection_index,
2588 : thread_index);
2589 0 : if (tc->state != TCP_STATE_TIME_WAIT)
2590 : {
2591 0 : tcp_inc_counter (listen, TCP_ERROR_CREATE_EXISTS, 1);
2592 0 : goto done;
2593 : }
2594 :
2595 0 : if (PREDICT_FALSE (!syn_during_timewait (tc, b[0], &tw_iss)))
2596 : {
2597 : /* This SYN can't be accepted */
2598 0 : tcp_inc_counter (listen, TCP_ERROR_CREATE_EXISTS, 1);
2599 0 : goto done;
2600 : }
2601 :
2602 0 : lc = tcp_lookup_listener (b[0], tc->c_fib_index, is_ip4);
2603 : /* clean up the old session */
2604 0 : tcp_connection_del (tc);
2605 : /* listener was cleaned up */
2606 0 : if (!lc)
2607 : {
2608 0 : tcp_inc_counter (listen, TCP_ERROR_NO_LISTENER, 1);
2609 0 : goto done;
2610 : }
2611 : }
2612 :
2613 : /* Make sure connection wasn't just created */
2614 : child =
2615 135 : tcp_lookup_connection (lc->c_fib_index, b[0], thread_index, is_ip4);
2616 135 : if (PREDICT_FALSE (child->state != TCP_STATE_LISTEN))
2617 : {
2618 0 : tcp_inc_counter (listen, TCP_ERROR_CREATE_EXISTS, 1);
2619 0 : goto done;
2620 : }
2621 :
2622 : /* Create child session. For syn-flood protection use filter */
2623 :
2624 : /* 1. first check for an RST: handled by input dispatch */
2625 :
2626 : /* 2. second check for an ACK: handled by input dispatch */
2627 :
2628 : /* 3. check for a SYN (did that already) */
2629 :
2630 : /* Create child session and send SYN-ACK */
2631 135 : child = tcp_connection_alloc (thread_index);
2632 :
2633 135 : if (tcp_options_parse (tcp_buffer_hdr (b[0]), &child->rcv_opts, 1))
2634 : {
2635 0 : tcp_inc_counter (listen, TCP_ERROR_OPTIONS, 1);
2636 0 : tcp_connection_free (child);
2637 0 : goto done;
2638 : }
2639 :
2640 135 : tcp_init_w_buffer (child, b[0], is_ip4);
2641 :
2642 135 : child->state = TCP_STATE_SYN_RCVD;
2643 135 : child->c_fib_index = lc->c_fib_index;
2644 135 : child->cc_algo = lc->cc_algo;
2645 :
2646 : /* In the regular case, the tw_iss will be zero, but
2647 : * in the special case of syn arriving in time_wait state, the value
2648 : * will be set according to rfc 1122
2649 : */
2650 135 : child->iss = tw_iss;
2651 135 : tcp_connection_init_vars (child);
2652 135 : child->rto = TCP_RTO_MIN;
2653 :
2654 : /*
2655 : * This initializes elog track, must be done before synack.
2656 : * We also do it before possible tcp_connection_cleanup() as it
2657 : * generates TCP_EVT_DELETE event.
2658 : */
2659 : TCP_EVT (TCP_EVT_SYN_RCVD, child, 1);
2660 :
2661 135 : if (session_stream_accept (&child->connection, lc->c_s_index,
2662 : lc->c_thread_index, 0 /* notify */ ))
2663 : {
2664 0 : tcp_connection_cleanup (child);
2665 0 : tcp_inc_counter (listen, TCP_ERROR_CREATE_SESSION_FAIL, 1);
2666 0 : goto done;
2667 : }
2668 :
2669 135 : transport_fifos_init_ooo (&child->connection);
2670 135 : child->tx_fifo_size = transport_tx_fifo_size (&child->connection);
2671 :
2672 135 : tcp_send_synack (child);
2673 135 : n_syns += 1;
2674 :
2675 135 : done:
2676 135 : b += 1;
2677 135 : n_left_from -= 1;
2678 : }
2679 :
2680 36 : tcp_inc_counter (listen, TCP_ERROR_SYNS_RCVD, n_syns);
2681 36 : vlib_buffer_free (vm, from, frame->n_vectors);
2682 :
2683 36 : return frame->n_vectors;
2684 : }
2685 :
2686 2335 : VLIB_NODE_FN (tcp4_listen_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
2687 : vlib_frame_t * from_frame)
2688 : {
2689 35 : return tcp46_listen_inline (vm, node, from_frame, 1 /* is_ip4 */ );
2690 : }
2691 :
2692 2301 : VLIB_NODE_FN (tcp6_listen_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
2693 : vlib_frame_t * from_frame)
2694 : {
2695 1 : return tcp46_listen_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2696 : }
2697 :
2698 : /* *INDENT-OFF* */
2699 183788 : VLIB_REGISTER_NODE (tcp4_listen_node) = {
2700 : .name = "tcp4-listen",
2701 : /* Takes a vector of packets. */
2702 : .vector_size = sizeof (u32),
2703 : .n_errors = TCP_N_ERROR,
2704 : .error_counters = tcp_input_error_counters,
2705 : .format_trace = format_tcp_rx_trace_short,
2706 : };
2707 : /* *INDENT-ON* */
2708 :
2709 : /* *INDENT-OFF* */
2710 183788 : VLIB_REGISTER_NODE (tcp6_listen_node) = {
2711 : .name = "tcp6-listen",
2712 : /* Takes a vector of packets. */
2713 : .vector_size = sizeof (u32),
2714 : .n_errors = TCP_N_ERROR,
2715 : .error_counters = tcp_input_error_counters,
2716 : .format_trace = format_tcp_rx_trace_short,
2717 : };
2718 : /* *INDENT-ON* */
2719 :
2720 : always_inline uword
2721 4 : tcp46_drop_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
2722 : vlib_frame_t *frame, int is_ip4)
2723 : {
2724 4 : u32 *from = vlib_frame_vector_args (frame);
2725 :
2726 : /* Error counters must be incremented by previous nodes */
2727 4 : vlib_buffer_free (vm, from, frame->n_vectors);
2728 :
2729 4 : return frame->n_vectors;
2730 : }
2731 :
2732 2304 : VLIB_NODE_FN (tcp4_drop_node)
2733 : (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
2734 : {
2735 4 : return tcp46_drop_inline (vm, node, from_frame, 1 /* is_ip4 */);
2736 : }
2737 :
2738 2300 : VLIB_NODE_FN (tcp6_drop_node)
2739 : (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
2740 : {
2741 0 : return tcp46_drop_inline (vm, node, from_frame, 0 /* is_ip4 */);
2742 : }
2743 :
2744 183788 : VLIB_REGISTER_NODE (tcp4_drop_node) = {
2745 : .name = "tcp4-drop",
2746 : .vector_size = sizeof (u32),
2747 : .n_errors = TCP_N_ERROR,
2748 : .error_counters = tcp_input_error_counters,
2749 : };
2750 :
2751 183788 : VLIB_REGISTER_NODE (tcp6_drop_node) = {
2752 : .name = "tcp6-drop",
2753 : .vector_size = sizeof (u32),
2754 : .n_errors = TCP_N_ERROR,
2755 : .error_counters = tcp_input_error_counters,
2756 : };
2757 :
2758 : #define foreach_tcp4_input_next \
2759 : _ (DROP, "tcp4-drop") \
2760 : _ (LISTEN, "tcp4-listen") \
2761 : _ (RCV_PROCESS, "tcp4-rcv-process") \
2762 : _ (SYN_SENT, "tcp4-syn-sent") \
2763 : _ (ESTABLISHED, "tcp4-established") \
2764 : _ (RESET, "tcp4-reset") \
2765 : _ (PUNT, "ip4-punt")
2766 :
2767 : #define foreach_tcp6_input_next \
2768 : _ (DROP, "tcp6-drop") \
2769 : _ (LISTEN, "tcp6-listen") \
2770 : _ (RCV_PROCESS, "tcp6-rcv-process") \
2771 : _ (SYN_SENT, "tcp6-syn-sent") \
2772 : _ (ESTABLISHED, "tcp6-established") \
2773 : _ (RESET, "tcp6-reset") \
2774 : _ (PUNT, "ip6-punt")
2775 :
2776 : #define filter_flags (TCP_FLAG_SYN|TCP_FLAG_ACK|TCP_FLAG_RST|TCP_FLAG_FIN)
2777 :
2778 : static void
2779 0 : tcp_input_set_error_next (tcp_main_t * tm, u16 * next, u32 * error, u8 is_ip4)
2780 : {
2781 0 : if (*error == TCP_ERROR_FILTERED || *error == TCP_ERROR_WRONG_THREAD)
2782 : {
2783 0 : *next = TCP_INPUT_NEXT_DROP;
2784 : }
2785 0 : else if ((is_ip4 && tm->punt_unknown4) || (!is_ip4 && tm->punt_unknown6))
2786 : {
2787 0 : *next = TCP_INPUT_NEXT_PUNT;
2788 0 : *error = TCP_ERROR_PUNT;
2789 : }
2790 : else
2791 : {
2792 0 : *next = TCP_INPUT_NEXT_RESET;
2793 0 : *error = TCP_ERROR_NO_LISTENER;
2794 : }
2795 0 : }
2796 :
2797 : static inline void
2798 1062520 : tcp_input_dispatch_buffer (tcp_main_t *tm, tcp_connection_t *tc,
2799 : vlib_buffer_t *b, u16 *next, u16 *err_counters)
2800 : {
2801 : tcp_header_t *tcp;
2802 : u32 error;
2803 : u8 flags;
2804 :
2805 1062520 : tcp = tcp_buffer_hdr (b);
2806 1062520 : flags = tcp->flags & filter_flags;
2807 1062520 : *next = tm->dispatch_table[tc->state][flags].next;
2808 1062520 : error = tm->dispatch_table[tc->state][flags].error;
2809 1062520 : tc->segs_in += 1;
2810 :
2811 : /* Track connection state when packet was received. It is required
2812 : * for @ref tcp46_listen_inline to detect whether we reached
2813 : * the node as a result of a SYN packet received while in time-wait
2814 : * state. In this case the connection_index in vnet buffer will point
2815 : * to the existing tcp connection and not the listener
2816 : */
2817 1062520 : vnet_buffer (b)->tcp.flags = tc->state;
2818 :
2819 1062520 : if (PREDICT_FALSE (error != TCP_ERROR_NONE))
2820 : {
2821 174 : tcp_inc_err_counter (err_counters, error, 1);
2822 174 : if (error == TCP_ERROR_DISPATCH)
2823 0 : clib_warning ("tcp conn %u disp error state %U flags %U",
2824 : tc->c_c_index, format_tcp_state, tc->state,
2825 : format_tcp_flags, (int) flags);
2826 : }
2827 1062520 : }
2828 :
2829 : always_inline uword
2830 38166 : tcp46_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2831 : vlib_frame_t * frame, int is_ip4, u8 is_nolookup)
2832 : {
2833 38166 : u32 n_left_from, *from, thread_index = vm->thread_index;
2834 38166 : tcp_main_t *tm = vnet_get_tcp_main ();
2835 : vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
2836 : u16 nexts[VLIB_FRAME_SIZE], *next;
2837 38166 : u16 err_counters[TCP_N_ERROR] = { 0 };
2838 :
2839 38166 : tcp_update_time_now (tcp_get_worker (thread_index));
2840 :
2841 38166 : from = vlib_frame_vector_args (frame);
2842 38166 : n_left_from = frame->n_vectors;
2843 38166 : vlib_get_buffers (vm, from, bufs, n_left_from);
2844 :
2845 38166 : b = bufs;
2846 38166 : next = nexts;
2847 :
2848 534869 : while (n_left_from >= 4)
2849 : {
2850 496703 : u32 error0 = TCP_ERROR_NO_LISTENER, error1 = TCP_ERROR_NO_LISTENER;
2851 : tcp_connection_t *tc0, *tc1;
2852 :
2853 : {
2854 496703 : vlib_prefetch_buffer_header (b[2], STORE);
2855 496703 : CLIB_PREFETCH (b[2]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
2856 :
2857 496703 : vlib_prefetch_buffer_header (b[3], STORE);
2858 496703 : CLIB_PREFETCH (b[3]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
2859 : }
2860 :
2861 496703 : next[0] = next[1] = TCP_INPUT_NEXT_DROP;
2862 :
2863 496703 : tc0 = tcp_input_lookup_buffer (b[0], thread_index, &error0, is_ip4,
2864 : is_nolookup);
2865 496703 : tc1 = tcp_input_lookup_buffer (b[1], thread_index, &error1, is_ip4,
2866 : is_nolookup);
2867 :
2868 496703 : if (PREDICT_TRUE (!tc0 + !tc1 == 0))
2869 : {
2870 496703 : ASSERT (tcp_lookup_is_valid (tc0, b[0], tcp_buffer_hdr (b[0])));
2871 496703 : ASSERT (tcp_lookup_is_valid (tc1, b[1], tcp_buffer_hdr (b[1])));
2872 :
2873 496703 : vnet_buffer (b[0])->tcp.connection_index = tc0->c_c_index;
2874 496703 : vnet_buffer (b[1])->tcp.connection_index = tc1->c_c_index;
2875 :
2876 496703 : tcp_input_dispatch_buffer (tm, tc0, b[0], &next[0], err_counters);
2877 496703 : tcp_input_dispatch_buffer (tm, tc1, b[1], &next[1], err_counters);
2878 : }
2879 : else
2880 : {
2881 0 : if (PREDICT_TRUE (tc0 != 0))
2882 : {
2883 0 : ASSERT (tcp_lookup_is_valid (tc0, b[0], tcp_buffer_hdr (b[0])));
2884 0 : vnet_buffer (b[0])->tcp.connection_index = tc0->c_c_index;
2885 0 : tcp_input_dispatch_buffer (tm, tc0, b[0], &next[0],
2886 : err_counters);
2887 : }
2888 : else
2889 : {
2890 0 : tcp_input_set_error_next (tm, &next[0], &error0, is_ip4);
2891 0 : tcp_inc_err_counter (err_counters, error0, 1);
2892 : }
2893 :
2894 0 : if (PREDICT_TRUE (tc1 != 0))
2895 : {
2896 0 : ASSERT (tcp_lookup_is_valid (tc1, b[1], tcp_buffer_hdr (b[1])));
2897 0 : vnet_buffer (b[1])->tcp.connection_index = tc1->c_c_index;
2898 0 : tcp_input_dispatch_buffer (tm, tc1, b[1], &next[1],
2899 : err_counters);
2900 : }
2901 : else
2902 : {
2903 0 : tcp_input_set_error_next (tm, &next[1], &error1, is_ip4);
2904 0 : tcp_inc_err_counter (err_counters, error1, 1);
2905 : }
2906 : }
2907 :
2908 496703 : b += 2;
2909 496703 : next += 2;
2910 496703 : n_left_from -= 2;
2911 : }
2912 107275 : while (n_left_from > 0)
2913 : {
2914 : tcp_connection_t *tc0;
2915 69109 : u32 error0 = TCP_ERROR_NO_LISTENER;
2916 :
2917 69109 : if (n_left_from > 1)
2918 : {
2919 30943 : vlib_prefetch_buffer_header (b[1], STORE);
2920 30943 : CLIB_PREFETCH (b[1]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
2921 : }
2922 :
2923 69109 : next[0] = TCP_INPUT_NEXT_DROP;
2924 69109 : tc0 = tcp_input_lookup_buffer (b[0], thread_index, &error0, is_ip4,
2925 : is_nolookup);
2926 69109 : if (PREDICT_TRUE (tc0 != 0))
2927 : {
2928 69109 : ASSERT (tcp_lookup_is_valid (tc0, b[0], tcp_buffer_hdr (b[0])));
2929 69109 : vnet_buffer (b[0])->tcp.connection_index = tc0->c_c_index;
2930 69109 : tcp_input_dispatch_buffer (tm, tc0, b[0], &next[0], err_counters);
2931 : }
2932 : else
2933 : {
2934 0 : tcp_input_set_error_next (tm, &next[0], &error0, is_ip4);
2935 0 : tcp_inc_err_counter (err_counters, error0, 1);
2936 : }
2937 :
2938 69109 : b += 1;
2939 69109 : next += 1;
2940 69109 : n_left_from -= 1;
2941 : }
2942 :
2943 38166 : if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
2944 3 : tcp_input_trace_frame (vm, node, bufs, nexts, frame->n_vectors, is_ip4);
2945 :
2946 1450310 : tcp_store_err_counters (input, err_counters);
2947 38166 : vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
2948 38166 : return frame->n_vectors;
2949 : }
2950 :
2951 2300 : VLIB_NODE_FN (tcp4_input_nolookup_node) (vlib_main_t * vm,
2952 : vlib_node_runtime_t * node,
2953 : vlib_frame_t * from_frame)
2954 : {
2955 0 : return tcp46_input_inline (vm, node, from_frame, 1 /* is_ip4 */ ,
2956 : 1 /* is_nolookup */ );
2957 : }
2958 :
2959 2300 : VLIB_NODE_FN (tcp6_input_nolookup_node) (vlib_main_t * vm,
2960 : vlib_node_runtime_t * node,
2961 : vlib_frame_t * from_frame)
2962 : {
2963 0 : return tcp46_input_inline (vm, node, from_frame, 0 /* is_ip4 */ ,
2964 : 1 /* is_nolookup */ );
2965 : }
2966 :
2967 : /* *INDENT-OFF* */
2968 183788 : VLIB_REGISTER_NODE (tcp4_input_nolookup_node) =
2969 : {
2970 : .name = "tcp4-input-nolookup",
2971 : /* Takes a vector of packets. */
2972 : .vector_size = sizeof (u32),
2973 : .n_errors = TCP_N_ERROR,
2974 : .error_counters = tcp_input_error_counters,
2975 : .n_next_nodes = TCP_INPUT_N_NEXT,
2976 : .next_nodes =
2977 : {
2978 : #define _(s,n) [TCP_INPUT_NEXT_##s] = n,
2979 : foreach_tcp4_input_next
2980 : #undef _
2981 : },
2982 : .format_buffer = format_tcp_header,
2983 : .format_trace = format_tcp_rx_trace,
2984 : };
2985 : /* *INDENT-ON* */
2986 :
2987 : /* *INDENT-OFF* */
2988 183788 : VLIB_REGISTER_NODE (tcp6_input_nolookup_node) =
2989 : {
2990 : .name = "tcp6-input-nolookup",
2991 : /* Takes a vector of packets. */
2992 : .vector_size = sizeof (u32),
2993 : .n_errors = TCP_N_ERROR,
2994 : .error_counters = tcp_input_error_counters,
2995 : .n_next_nodes = TCP_INPUT_N_NEXT,
2996 : .next_nodes =
2997 : {
2998 : #define _(s,n) [TCP_INPUT_NEXT_##s] = n,
2999 : foreach_tcp6_input_next
3000 : #undef _
3001 : },
3002 : .format_buffer = format_tcp_header,
3003 : .format_trace = format_tcp_rx_trace,
3004 : };
3005 : /* *INDENT-ON* */
3006 :
3007 40449 : VLIB_NODE_FN (tcp4_input_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
3008 : vlib_frame_t * from_frame)
3009 : {
3010 38149 : return tcp46_input_inline (vm, node, from_frame, 1 /* is_ip4 */ ,
3011 : 0 /* is_nolookup */ );
3012 : }
3013 :
3014 2317 : VLIB_NODE_FN (tcp6_input_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
3015 : vlib_frame_t * from_frame)
3016 : {
3017 17 : return tcp46_input_inline (vm, node, from_frame, 0 /* is_ip4 */ ,
3018 : 0 /* is_nolookup */ );
3019 : }
3020 :
3021 : /* *INDENT-OFF* */
3022 183788 : VLIB_REGISTER_NODE (tcp4_input_node) =
3023 : {
3024 : .name = "tcp4-input",
3025 : /* Takes a vector of packets. */
3026 : .vector_size = sizeof (u32),
3027 : .n_errors = TCP_N_ERROR,
3028 : .error_counters = tcp_input_error_counters,
3029 : .n_next_nodes = TCP_INPUT_N_NEXT,
3030 : .next_nodes =
3031 : {
3032 : #define _(s,n) [TCP_INPUT_NEXT_##s] = n,
3033 : foreach_tcp4_input_next
3034 : #undef _
3035 : },
3036 : .format_buffer = format_tcp_header,
3037 : .format_trace = format_tcp_rx_trace,
3038 : };
3039 : /* *INDENT-ON* */
3040 :
3041 : /* *INDENT-OFF* */
3042 183788 : VLIB_REGISTER_NODE (tcp6_input_node) =
3043 : {
3044 : .name = "tcp6-input",
3045 : /* Takes a vector of packets. */
3046 : .vector_size = sizeof (u32),
3047 : .n_errors = TCP_N_ERROR,
3048 : .error_counters = tcp_input_error_counters,
3049 : .n_next_nodes = TCP_INPUT_N_NEXT,
3050 : .next_nodes =
3051 : {
3052 : #define _(s,n) [TCP_INPUT_NEXT_##s] = n,
3053 : foreach_tcp6_input_next
3054 : #undef _
3055 : },
3056 : .format_buffer = format_tcp_header,
3057 : .format_trace = format_tcp_rx_trace,
3058 : };
3059 : /* *INDENT-ON* */
3060 :
3061 : #ifndef CLIB_MARCH_VARIANT
3062 : void
3063 0 : tcp_check_gso (tcp_connection_t *tc)
3064 : {
3065 0 : tcp_check_tx_offload (tc, tc->c_is_ip4);
3066 0 : }
3067 :
3068 : static void
3069 575 : tcp_dispatch_table_init (tcp_main_t * tm)
3070 : {
3071 : int i, j;
3072 6900 : for (i = 0; i < ARRAY_LEN (tm->dispatch_table); i++)
3073 411125 : for (j = 0; j < ARRAY_LEN (tm->dispatch_table[i]); j++)
3074 : {
3075 404800 : tm->dispatch_table[i][j].next = TCP_INPUT_NEXT_DROP;
3076 404800 : tm->dispatch_table[i][j].error = TCP_ERROR_DISPATCH;
3077 : }
3078 :
3079 : #define _(t,f,n,e) \
3080 : do { \
3081 : tm->dispatch_table[TCP_STATE_##t][f].next = (n); \
3082 : tm->dispatch_table[TCP_STATE_##t][f].error = (e); \
3083 : } while (0)
3084 :
3085 : /* RFC 793: In LISTEN if RST drop and if ACK return RST */
3086 575 : _(LISTEN, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3087 575 : _(LISTEN, TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET, TCP_ERROR_ACK_INVALID);
3088 575 : _(LISTEN, TCP_FLAG_RST, TCP_INPUT_NEXT_DROP, TCP_ERROR_INVALID_CONNECTION);
3089 575 : _(LISTEN, TCP_FLAG_SYN, TCP_INPUT_NEXT_LISTEN, TCP_ERROR_NONE);
3090 575 : _(LISTEN, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET,
3091 : TCP_ERROR_ACK_INVALID);
3092 575 : _(LISTEN, TCP_FLAG_SYN | TCP_FLAG_RST, TCP_INPUT_NEXT_DROP,
3093 : TCP_ERROR_SEGMENT_INVALID);
3094 575 : _(LISTEN, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_DROP,
3095 : TCP_ERROR_SEGMENT_INVALID);
3096 575 : _(LISTEN, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_DROP,
3097 : TCP_ERROR_INVALID_CONNECTION);
3098 575 : _(LISTEN, TCP_FLAG_FIN, TCP_INPUT_NEXT_RESET, TCP_ERROR_SEGMENT_INVALID);
3099 575 : _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET,
3100 : TCP_ERROR_SEGMENT_INVALID);
3101 575 : _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_RST, TCP_INPUT_NEXT_DROP,
3102 : TCP_ERROR_SEGMENT_INVALID);
3103 575 : _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_DROP,
3104 : TCP_ERROR_SEGMENT_INVALID);
3105 575 : _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_SYN, TCP_INPUT_NEXT_DROP,
3106 : TCP_ERROR_SEGMENT_INVALID);
3107 575 : _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_DROP,
3108 : TCP_ERROR_SEGMENT_INVALID);
3109 575 : _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST, TCP_INPUT_NEXT_DROP,
3110 : TCP_ERROR_SEGMENT_INVALID);
3111 575 : _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3112 : TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3113 : /* ACK for for a SYN-ACK -> tcp-rcv-process. */
3114 575 : _(SYN_RCVD, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3115 575 : _(SYN_RCVD, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3116 575 : _(SYN_RCVD, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3117 : TCP_ERROR_NONE);
3118 575 : _(SYN_RCVD, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3119 575 : _(SYN_RCVD, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3120 : TCP_ERROR_NONE);
3121 575 : _(SYN_RCVD, TCP_FLAG_SYN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
3122 : TCP_ERROR_NONE);
3123 575 : _(SYN_RCVD, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3124 : TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3125 575 : _(SYN_RCVD, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3126 575 : _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3127 : TCP_ERROR_NONE);
3128 575 : _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
3129 : TCP_ERROR_NONE);
3130 575 : _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK,
3131 : TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3132 575 : _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS,
3133 : TCP_ERROR_NONE);
3134 575 : _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST,
3135 : TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3136 575 : _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK,
3137 : TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3138 575 : _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3139 : TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3140 575 : _(SYN_RCVD, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3141 : /* SYN-ACK for a SYN */
3142 575 : _(SYN_SENT, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT,
3143 : TCP_ERROR_NONE);
3144 575 : _(SYN_SENT, TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT, TCP_ERROR_NONE);
3145 575 : _(SYN_SENT, TCP_FLAG_RST, TCP_INPUT_NEXT_SYN_SENT, TCP_ERROR_NONE);
3146 575 : _(SYN_SENT, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT,
3147 : TCP_ERROR_NONE);
3148 575 : _(SYN_SENT, TCP_FLAG_FIN, TCP_INPUT_NEXT_SYN_SENT, TCP_ERROR_NONE);
3149 575 : _(SYN_SENT, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT,
3150 : TCP_ERROR_NONE);
3151 : /* ACK for for established connection -> tcp-established. */
3152 575 : _(ESTABLISHED, TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3153 : /* FIN for for established connection -> tcp-established. */
3154 575 : _(ESTABLISHED, TCP_FLAG_FIN, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3155 575 : _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED,
3156 : TCP_ERROR_NONE);
3157 575 : _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_RST, TCP_INPUT_NEXT_ESTABLISHED,
3158 : TCP_ERROR_NONE);
3159 575 : _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK,
3160 : TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3161 575 : _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_SYN, TCP_INPUT_NEXT_ESTABLISHED,
3162 : TCP_ERROR_NONE);
3163 575 : _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK,
3164 : TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3165 575 : _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST,
3166 : TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3167 575 : _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3168 : TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3169 575 : _(ESTABLISHED, TCP_FLAG_RST, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3170 575 : _(ESTABLISHED, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED,
3171 : TCP_ERROR_NONE);
3172 575 : _(ESTABLISHED, TCP_FLAG_SYN, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3173 575 : _(ESTABLISHED, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED,
3174 : TCP_ERROR_NONE);
3175 575 : _(ESTABLISHED, TCP_FLAG_SYN | TCP_FLAG_RST, TCP_INPUT_NEXT_ESTABLISHED,
3176 : TCP_ERROR_NONE);
3177 575 : _(ESTABLISHED, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3178 : TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3179 575 : _(ESTABLISHED, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3180 : /* ACK or FIN-ACK to our FIN */
3181 575 : _(FIN_WAIT_1, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3182 575 : _(FIN_WAIT_1, TCP_FLAG_ACK | TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS,
3183 : TCP_ERROR_NONE);
3184 : /* FIN in reply to our FIN from the other side */
3185 575 : _(FIN_WAIT_1, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3186 575 : _(FIN_WAIT_1, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3187 575 : _(FIN_WAIT_1, TCP_FLAG_FIN | TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS,
3188 : TCP_ERROR_NONE);
3189 575 : _(FIN_WAIT_1, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK,
3190 : TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3191 575 : _(FIN_WAIT_1, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST,
3192 : TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3193 575 : _(FIN_WAIT_1, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3194 : TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3195 575 : _(FIN_WAIT_1, TCP_FLAG_FIN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
3196 : TCP_ERROR_NONE);
3197 575 : _(FIN_WAIT_1, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK,
3198 : TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3199 575 : _(FIN_WAIT_1, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3200 575 : _(FIN_WAIT_1, TCP_FLAG_SYN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
3201 : TCP_ERROR_NONE);
3202 575 : _(FIN_WAIT_1, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3203 : TCP_ERROR_NONE);
3204 575 : _(FIN_WAIT_1, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3205 : TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3206 575 : _(FIN_WAIT_1, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3207 575 : _(FIN_WAIT_1, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3208 : TCP_ERROR_NONE);
3209 575 : _(CLOSING, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3210 575 : _(CLOSING, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3211 575 : _(CLOSING, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3212 575 : _(CLOSING, TCP_FLAG_SYN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
3213 : TCP_ERROR_NONE);
3214 575 : _(CLOSING, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3215 : TCP_ERROR_NONE);
3216 575 : _(CLOSING, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3217 : TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3218 575 : _(CLOSING, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3219 575 : _(CLOSING, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3220 : TCP_ERROR_NONE);
3221 575 : _(CLOSING, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3222 575 : _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3223 : TCP_ERROR_NONE);
3224 575 : _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
3225 : TCP_ERROR_NONE);
3226 575 : _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK,
3227 : TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3228 575 : _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS,
3229 : TCP_ERROR_NONE);
3230 575 : _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK,
3231 : TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3232 575 : _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST,
3233 : TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3234 575 : _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3235 : TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3236 : /* FIN confirming that the peer (app) has closed */
3237 575 : _(FIN_WAIT_2, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3238 575 : _(FIN_WAIT_2, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3239 575 : _(FIN_WAIT_2, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3240 : TCP_ERROR_NONE);
3241 575 : _(FIN_WAIT_2, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3242 575 : _(FIN_WAIT_2, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3243 : TCP_ERROR_NONE);
3244 575 : _(FIN_WAIT_2, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3245 575 : _ (FIN_WAIT_2, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3246 : TCP_ERROR_NONE);
3247 575 : _(CLOSE_WAIT, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3248 575 : _(CLOSE_WAIT, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3249 : TCP_ERROR_NONE);
3250 575 : _(CLOSE_WAIT, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3251 575 : _(CLOSE_WAIT, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3252 : TCP_ERROR_NONE);
3253 575 : _(CLOSE_WAIT, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3254 575 : _(LAST_ACK, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3255 575 : _(LAST_ACK, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3256 575 : _(LAST_ACK, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3257 575 : _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3258 : TCP_ERROR_NONE);
3259 575 : _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS,
3260 : TCP_ERROR_NONE);
3261 575 : _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK,
3262 : TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3263 575 : _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
3264 : TCP_ERROR_NONE);
3265 575 : _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK,
3266 : TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3267 575 : _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST,
3268 : TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3269 575 : _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3270 : TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3271 575 : _(LAST_ACK, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3272 575 : _(LAST_ACK, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3273 : TCP_ERROR_NONE);
3274 575 : _(LAST_ACK, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3275 575 : _(LAST_ACK, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3276 : TCP_ERROR_NONE);
3277 575 : _(LAST_ACK, TCP_FLAG_SYN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
3278 : TCP_ERROR_NONE);
3279 575 : _(LAST_ACK, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3280 : TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3281 575 : _(TIME_WAIT, TCP_FLAG_SYN, TCP_INPUT_NEXT_LISTEN, TCP_ERROR_NONE);
3282 575 : _(TIME_WAIT, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3283 575 : _(TIME_WAIT, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3284 : TCP_ERROR_NONE);
3285 575 : _(TIME_WAIT, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3286 575 : _(TIME_WAIT, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3287 : TCP_ERROR_NONE);
3288 575 : _(TIME_WAIT, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3289 : /* RFC793 CLOSED: An incoming segment containing a RST is discarded. An
3290 : * incoming segment not containing a RST causes a RST to be sent in
3291 : * response.*/
3292 575 : _(CLOSED, TCP_FLAG_RST, TCP_INPUT_NEXT_DROP, TCP_ERROR_CONNECTION_CLOSED);
3293 575 : _(CLOSED, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_DROP,
3294 : TCP_ERROR_CONNECTION_CLOSED);
3295 575 : _(CLOSED, TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET, TCP_ERROR_CONNECTION_CLOSED);
3296 575 : _ (CLOSED, TCP_FLAG_SYN, TCP_INPUT_NEXT_LISTEN, TCP_ERROR_NONE);
3297 575 : _(CLOSED, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET,
3298 : TCP_ERROR_CONNECTION_CLOSED);
3299 : #undef _
3300 575 : }
3301 :
3302 : static clib_error_t *
3303 575 : tcp_input_init (vlib_main_t * vm)
3304 : {
3305 575 : clib_error_t *error = 0;
3306 575 : tcp_main_t *tm = vnet_get_tcp_main ();
3307 :
3308 575 : if ((error = vlib_call_init_function (vm, tcp_init)))
3309 0 : return error;
3310 :
3311 : /* Initialize dispatch table. */
3312 575 : tcp_dispatch_table_init (tm);
3313 :
3314 575 : return error;
3315 : }
3316 :
3317 58175 : VLIB_INIT_FUNCTION (tcp_input_init);
3318 :
3319 : #endif /* CLIB_MARCH_VARIANT */
3320 :
3321 : /*
3322 : * fd.io coding-style-patch-verification: ON
3323 : *
3324 : * Local Variables:
3325 : * eval: (c-set-style "gnu")
3326 : * End:
3327 : */
|