[init] Show startup and shutdown function names in debug messages
[ipxe.git] / src / net / tcp.c
1 #include <string.h>
2 #include <stdlib.h>
3 #include <stdio.h>
4 #include <assert.h>
5 #include <errno.h>
6 #include <byteswap.h>
7 #include <ipxe/timer.h>
8 #include <ipxe/iobuf.h>
9 #include <ipxe/malloc.h>
10 #include <ipxe/init.h>
11 #include <ipxe/retry.h>
12 #include <ipxe/refcnt.h>
13 #include <ipxe/pending.h>
14 #include <ipxe/xfer.h>
15 #include <ipxe/open.h>
16 #include <ipxe/uri.h>
17 #include <ipxe/netdevice.h>
18 #include <ipxe/profile.h>
19 #include <ipxe/process.h>
20 #include <ipxe/tcpip.h>
21 #include <ipxe/tcp.h>
22
23 /** @file
24 *
25 * TCP protocol
26 *
27 */
28
29 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
30
31 /** A TCP connection */
32 struct tcp_connection {
33 /** Reference counter */
34 struct refcnt refcnt;
35 /** List of TCP connections */
36 struct list_head list;
37
38 /** Flags */
39 unsigned int flags;
40
41 /** Data transfer interface */
42 struct interface xfer;
43
44 /** Remote socket address */
45 struct sockaddr_tcpip peer;
46 /** Local port */
47 unsigned int local_port;
48 /** Maximum segment size */
49 size_t mss;
50
51 /** Current TCP state */
52 unsigned int tcp_state;
53 /** Previous TCP state
54 *
55 * Maintained only for debug messages
56 */
57 unsigned int prev_tcp_state;
58 /** Current sequence number
59 *
60 * Equivalent to SND.UNA in RFC 793 terminology.
61 */
62 uint32_t snd_seq;
63 /** Unacknowledged sequence count
64 *
65 * Equivalent to (SND.NXT-SND.UNA) in RFC 793 terminology.
66 */
67 uint32_t snd_sent;
68 /** Send window
69 *
70 * Equivalent to SND.WND in RFC 793 terminology
71 */
72 uint32_t snd_win;
73 /** Current acknowledgement number
74 *
75 * Equivalent to RCV.NXT in RFC 793 terminology.
76 */
77 uint32_t rcv_ack;
78 /** Receive window
79 *
80 * Equivalent to RCV.WND in RFC 793 terminology.
81 */
82 uint32_t rcv_win;
83 /** Received timestamp value
84 *
85 * Updated when a packet is received; copied to ts_recent when
86 * the window is advanced.
87 */
88 uint32_t ts_val;
89 /** Most recent received timestamp that advanced the window
90 *
91 * Equivalent to TS.Recent in RFC 1323 terminology.
92 */
93 uint32_t ts_recent;
94 /** Send window scale
95 *
96 * Equivalent to Snd.Wind.Scale in RFC 1323 terminology
97 */
98 uint8_t snd_win_scale;
99 /** Receive window scale
100 *
101 * Equivalent to Rcv.Wind.Scale in RFC 1323 terminology
102 */
103 uint8_t rcv_win_scale;
104
105 /** Selective acknowledgement list (in host-endian order) */
106 struct tcp_sack_block sack[TCP_SACK_MAX];
107
108 /** Transmit queue */
109 struct list_head tx_queue;
110 /** Receive queue */
111 struct list_head rx_queue;
112 /** Transmission process */
113 struct process process;
114 /** Retransmission timer */
115 struct retry_timer timer;
116 /** Keepalive timer */
117 struct retry_timer keepalive;
118 /** Shutdown (TIME_WAIT) timer */
119 struct retry_timer wait;
120
121 /** Pending operations for SYN and FIN */
122 struct pending_operation pending_flags;
123 /** Pending operations for transmit queue */
124 struct pending_operation pending_data;
125 };
126
127 /** TCP flags */
128 enum tcp_flags {
129 /** TCP data transfer interface has been closed */
130 TCP_XFER_CLOSED = 0x0001,
131 /** TCP timestamps are enabled */
132 TCP_TS_ENABLED = 0x0002,
133 /** TCP acknowledgement is pending */
134 TCP_ACK_PENDING = 0x0004,
135 /** TCP selective acknowledgement is enabled */
136 TCP_SACK_ENABLED = 0x0008,
137 };
138
139 /** TCP internal header
140 *
141 * This is the header that replaces the TCP header for packets
142 * enqueued on the receive queue.
143 */
144 struct tcp_rx_queued_header {
145 /** SEQ value, in host-endian order
146 *
147 * This represents the SEQ value at the time the packet is
148 * enqueued, and so excludes the SYN, if present.
149 */
150 uint32_t seq;
151 /** Next SEQ value, in host-endian order */
152 uint32_t nxt;
153 /** Flags
154 *
155 * Only FIN is valid within this flags byte; all other flags
156 * have already been processed by the time the packet is
157 * enqueued.
158 */
159 uint8_t flags;
160 /** Reserved */
161 uint8_t reserved[3];
162 };
163
164 /**
165 * List of registered TCP connections
166 */
167 static LIST_HEAD ( tcp_conns );
168
169 /** Transmit profiler */
170 static struct profiler tcp_tx_profiler __profiler = { .name = "tcp.tx" };
171
172 /** Receive profiler */
173 static struct profiler tcp_rx_profiler __profiler = { .name = "tcp.rx" };
174
175 /** Data transfer profiler */
176 static struct profiler tcp_xfer_profiler __profiler = { .name = "tcp.xfer" };
177
178 /* Forward declarations */
179 static struct process_descriptor tcp_process_desc;
180 static struct interface_descriptor tcp_xfer_desc;
181 static void tcp_expired ( struct retry_timer *timer, int over );
182 static void tcp_keepalive_expired ( struct retry_timer *timer, int over );
183 static void tcp_wait_expired ( struct retry_timer *timer, int over );
184 static struct tcp_connection * tcp_demux ( unsigned int local_port );
185 static int tcp_rx_ack ( struct tcp_connection *tcp, uint32_t ack,
186 uint32_t win );
187
188 /**
189 * Name TCP state
190 *
191 * @v state TCP state
192 * @ret name Name of TCP state
193 */
194 static inline __attribute__ (( always_inline )) const char *
195 tcp_state ( int state ) {
196 switch ( state ) {
197 case TCP_CLOSED: return "CLOSED";
198 case TCP_LISTEN: return "LISTEN";
199 case TCP_SYN_SENT: return "SYN_SENT";
200 case TCP_SYN_RCVD: return "SYN_RCVD";
201 case TCP_ESTABLISHED: return "ESTABLISHED";
202 case TCP_FIN_WAIT_1: return "FIN_WAIT_1";
203 case TCP_FIN_WAIT_2: return "FIN_WAIT_2";
204 case TCP_CLOSING_OR_LAST_ACK: return "CLOSING/LAST_ACK";
205 case TCP_TIME_WAIT: return "TIME_WAIT";
206 case TCP_CLOSE_WAIT: return "CLOSE_WAIT";
207 default: return "INVALID";
208 }
209 }
210
211 /**
212 * Dump TCP state transition
213 *
214 * @v tcp TCP connection
215 */
216 static inline __attribute__ (( always_inline )) void
217 tcp_dump_state ( struct tcp_connection *tcp ) {
218
219 if ( tcp->tcp_state != tcp->prev_tcp_state ) {
220 DBGC ( tcp, "TCP %p transitioned from %s to %s\n", tcp,
221 tcp_state ( tcp->prev_tcp_state ),
222 tcp_state ( tcp->tcp_state ) );
223 }
224 tcp->prev_tcp_state = tcp->tcp_state;
225 }
226
227 /**
228 * Dump TCP flags
229 *
230 * @v flags TCP flags
231 */
232 static inline __attribute__ (( always_inline )) void
233 tcp_dump_flags ( struct tcp_connection *tcp, unsigned int flags ) {
234 if ( flags & TCP_RST )
235 DBGC2 ( tcp, " RST" );
236 if ( flags & TCP_SYN )
237 DBGC2 ( tcp, " SYN" );
238 if ( flags & TCP_PSH )
239 DBGC2 ( tcp, " PSH" );
240 if ( flags & TCP_FIN )
241 DBGC2 ( tcp, " FIN" );
242 if ( flags & TCP_ACK )
243 DBGC2 ( tcp, " ACK" );
244 }
245
246 /***************************************************************************
247 *
248 * Open and close
249 *
250 ***************************************************************************
251 */
252
253 /**
254 * Check if local TCP port is available
255 *
256 * @v port Local port number
257 * @ret port Local port number, or negative error
258 */
259 static int tcp_port_available ( int port ) {
260
261 return ( tcp_demux ( port ) ? -EADDRINUSE : port );
262 }
263
264 /**
265 * Open a TCP connection
266 *
267 * @v xfer Data transfer interface
268 * @v peer Peer socket address
269 * @v local Local socket address, or NULL
270 * @ret rc Return status code
271 */
272 static int tcp_open ( struct interface *xfer, struct sockaddr *peer,
273 struct sockaddr *local ) {
274 struct sockaddr_tcpip *st_peer = ( struct sockaddr_tcpip * ) peer;
275 struct sockaddr_tcpip *st_local = ( struct sockaddr_tcpip * ) local;
276 struct tcp_connection *tcp;
277 size_t mtu;
278 int port;
279 int rc;
280
281 /* Allocate and initialise structure */
282 tcp = zalloc ( sizeof ( *tcp ) );
283 if ( ! tcp )
284 return -ENOMEM;
285 DBGC ( tcp, "TCP %p allocated\n", tcp );
286 ref_init ( &tcp->refcnt, NULL );
287 intf_init ( &tcp->xfer, &tcp_xfer_desc, &tcp->refcnt );
288 process_init_stopped ( &tcp->process, &tcp_process_desc, &tcp->refcnt );
289 timer_init ( &tcp->timer, tcp_expired, &tcp->refcnt );
290 timer_init ( &tcp->keepalive, tcp_keepalive_expired, &tcp->refcnt );
291 timer_init ( &tcp->wait, tcp_wait_expired, &tcp->refcnt );
292 tcp->prev_tcp_state = TCP_CLOSED;
293 tcp->tcp_state = TCP_STATE_SENT ( TCP_SYN );
294 tcp_dump_state ( tcp );
295 tcp->snd_seq = random();
296 INIT_LIST_HEAD ( &tcp->tx_queue );
297 INIT_LIST_HEAD ( &tcp->rx_queue );
298 memcpy ( &tcp->peer, st_peer, sizeof ( tcp->peer ) );
299
300 /* Calculate MSS */
301 mtu = tcpip_mtu ( &tcp->peer );
302 if ( ! mtu ) {
303 DBGC ( tcp, "TCP %p has no route to %s\n",
304 tcp, sock_ntoa ( peer ) );
305 rc = -ENETUNREACH;
306 goto err;
307 }
308 tcp->mss = ( mtu - sizeof ( struct tcp_header ) );
309
310 /* Bind to local port */
311 port = tcpip_bind ( st_local, tcp_port_available );
312 if ( port < 0 ) {
313 rc = port;
314 DBGC ( tcp, "TCP %p could not bind: %s\n",
315 tcp, strerror ( rc ) );
316 goto err;
317 }
318 tcp->local_port = port;
319 DBGC ( tcp, "TCP %p bound to port %d\n", tcp, tcp->local_port );
320
321 /* Start timer to initiate SYN */
322 start_timer_nodelay ( &tcp->timer );
323
324 /* Add a pending operation for the SYN */
325 pending_get ( &tcp->pending_flags );
326
327 /* Attach parent interface, transfer reference to connection
328 * list and return
329 */
330 intf_plug_plug ( &tcp->xfer, xfer );
331 list_add ( &tcp->list, &tcp_conns );
332 return 0;
333
334 err:
335 ref_put ( &tcp->refcnt );
336 return rc;
337 }
338
339 /**
340 * Close TCP connection
341 *
342 * @v tcp TCP connection
343 * @v rc Reason for close
344 *
345 * Closes the data transfer interface. If the TCP state machine is in
346 * a suitable state, the connection will be deleted.
347 */
348 static void tcp_close ( struct tcp_connection *tcp, int rc ) {
349 struct io_buffer *iobuf;
350 struct io_buffer *tmp;
351
352 /* Close data transfer interface */
353 intf_shutdown ( &tcp->xfer, rc );
354 tcp->flags |= TCP_XFER_CLOSED;
355
356 /* If we are in CLOSED, or have otherwise not yet received a
357 * SYN (i.e. we are in LISTEN or SYN_SENT), just delete the
358 * connection.
359 */
360 if ( ! ( tcp->tcp_state & TCP_STATE_RCVD ( TCP_SYN ) ) ) {
361
362 /* Transition to CLOSED for the sake of debugging messages */
363 tcp->tcp_state = TCP_CLOSED;
364 tcp_dump_state ( tcp );
365
366 /* Free any unprocessed I/O buffers */
367 list_for_each_entry_safe ( iobuf, tmp, &tcp->rx_queue, list ) {
368 list_del ( &iobuf->list );
369 free_iob ( iobuf );
370 }
371
372 /* Free any unsent I/O buffers */
373 list_for_each_entry_safe ( iobuf, tmp, &tcp->tx_queue, list ) {
374 list_del ( &iobuf->list );
375 free_iob ( iobuf );
376 pending_put ( &tcp->pending_data );
377 }
378 assert ( ! is_pending ( &tcp->pending_data ) );
379
380 /* Remove pending operations for SYN and FIN, if applicable */
381 pending_put ( &tcp->pending_flags );
382 pending_put ( &tcp->pending_flags );
383
384 /* Remove from list and drop reference */
385 process_del ( &tcp->process );
386 stop_timer ( &tcp->timer );
387 stop_timer ( &tcp->keepalive );
388 stop_timer ( &tcp->wait );
389 list_del ( &tcp->list );
390 ref_put ( &tcp->refcnt );
391 DBGC ( tcp, "TCP %p connection deleted\n", tcp );
392 return;
393 }
394
395 /* If we have not had our SYN acknowledged (i.e. we are in
396 * SYN_RCVD), pretend that it has been acknowledged so that we
397 * can send a FIN without breaking things.
398 */
399 if ( ! ( tcp->tcp_state & TCP_STATE_ACKED ( TCP_SYN ) ) )
400 tcp_rx_ack ( tcp, ( tcp->snd_seq + 1 ), 0 );
401
402 /* Stop keepalive timer */
403 stop_timer ( &tcp->keepalive );
404
405 /* If we have no data remaining to send, start sending FIN */
406 if ( list_empty ( &tcp->tx_queue ) &&
407 ! ( tcp->tcp_state & TCP_STATE_SENT ( TCP_FIN ) ) ) {
408
409 tcp->tcp_state |= TCP_STATE_SENT ( TCP_FIN );
410 tcp_dump_state ( tcp );
411 process_add ( &tcp->process );
412
413 /* Add a pending operation for the FIN */
414 pending_get ( &tcp->pending_flags );
415 }
416 }
417
418 /***************************************************************************
419 *
420 * Transmit data path
421 *
422 ***************************************************************************
423 */
424
425 /**
426 * Calculate transmission window
427 *
428 * @v tcp TCP connection
429 * @ret len Maximum length that can be sent in a single packet
430 */
431 static size_t tcp_xmit_win ( struct tcp_connection *tcp ) {
432 size_t len;
433
434 /* Not ready if we're not in a suitable connection state */
435 if ( ! TCP_CAN_SEND_DATA ( tcp->tcp_state ) )
436 return 0;
437
438 /* Length is the minimum of the receiver's window and the path MTU */
439 len = tcp->snd_win;
440 if ( len > TCP_PATH_MTU )
441 len = TCP_PATH_MTU;
442
443 return len;
444 }
445
446 /**
447 * Check data-transfer flow control window
448 *
449 * @v tcp TCP connection
450 * @ret len Length of window
451 */
452 static size_t tcp_xfer_window ( struct tcp_connection *tcp ) {
453
454 /* Not ready if data queue is non-empty. This imposes a limit
455 * of only one unACKed packet in the TX queue at any time; we
456 * do this to conserve memory usage.
457 */
458 if ( ! list_empty ( &tcp->tx_queue ) )
459 return 0;
460
461 /* Return TCP window length */
462 return tcp_xmit_win ( tcp );
463 }
464
465 /**
466 * Find selective acknowledgement block
467 *
468 * @v tcp TCP connection
469 * @v seq SEQ value in SACK block (in host-endian order)
470 * @v sack SACK block to fill in (in host-endian order)
471 * @ret len Length of SACK block
472 */
473 static uint32_t tcp_sack_block ( struct tcp_connection *tcp, uint32_t seq,
474 struct tcp_sack_block *sack ) {
475 struct io_buffer *iobuf;
476 struct tcp_rx_queued_header *tcpqhdr;
477 uint32_t left = tcp->rcv_ack;
478 uint32_t right = left;
479
480 /* Find highest block which does not start after SEQ */
481 list_for_each_entry ( iobuf, &tcp->rx_queue, list ) {
482 tcpqhdr = iobuf->data;
483 if ( tcp_cmp ( tcpqhdr->seq, right ) > 0 ) {
484 if ( tcp_cmp ( tcpqhdr->seq, seq ) > 0 )
485 break;
486 left = tcpqhdr->seq;
487 }
488 if ( tcp_cmp ( tcpqhdr->nxt, right ) > 0 )
489 right = tcpqhdr->nxt;
490 }
491
492 /* Fail if this block does not contain SEQ */
493 if ( tcp_cmp ( right, seq ) < 0 )
494 return 0;
495
496 /* Populate SACK block */
497 sack->left = left;
498 sack->right = right;
499 return ( right - left );
500 }
501
502 /**
503 * Update TCP selective acknowledgement list
504 *
505 * @v tcp TCP connection
506 * @v seq SEQ value in first SACK block (in host-endian order)
507 * @ret count Number of SACK blocks
508 */
509 static unsigned int tcp_sack ( struct tcp_connection *tcp, uint32_t seq ) {
510 struct tcp_sack_block sack[TCP_SACK_MAX];
511 unsigned int old = 0;
512 unsigned int new = 0;
513 unsigned int i;
514 uint32_t len;
515
516 /* Populate first new SACK block */
517 len = tcp_sack_block ( tcp, seq, &sack[0] );
518 if ( len )
519 new++;
520
521 /* Populate remaining new SACK blocks based on old SACK blocks */
522 for ( old = 0 ; old < TCP_SACK_MAX ; old++ ) {
523
524 /* Stop if we run out of space in the new list */
525 if ( new == TCP_SACK_MAX )
526 break;
527
528 /* Skip empty old SACK blocks */
529 if ( tcp->sack[old].left == tcp->sack[old].right )
530 continue;
531
532 /* Populate new SACK block */
533 len = tcp_sack_block ( tcp, tcp->sack[old].left, &sack[new] );
534 if ( len == 0 )
535 continue;
536
537 /* Eliminate duplicates */
538 for ( i = 0 ; i < new ; i++ ) {
539 if ( sack[i].left == sack[new].left ) {
540 new--;
541 break;
542 }
543 }
544 new++;
545 }
546
547 /* Update SACK list */
548 memset ( tcp->sack, 0, sizeof ( tcp->sack ) );
549 memcpy ( tcp->sack, sack, ( new * sizeof ( tcp->sack[0] ) ) );
550 return new;
551 }
552
553 /**
554 * Process TCP transmit queue
555 *
556 * @v tcp TCP connection
557 * @v max_len Maximum length to process
558 * @v dest I/O buffer to fill with data, or NULL
559 * @v remove Remove data from queue
560 * @ret len Length of data processed
561 *
562 * This processes at most @c max_len bytes from the TCP connection's
563 * transmit queue. Data will be copied into the @c dest I/O buffer
564 * (if provided) and, if @c remove is true, removed from the transmit
565 * queue.
566 */
567 static size_t tcp_process_tx_queue ( struct tcp_connection *tcp, size_t max_len,
568 struct io_buffer *dest, int remove ) {
569 struct io_buffer *iobuf;
570 struct io_buffer *tmp;
571 size_t frag_len;
572 size_t len = 0;
573
574 list_for_each_entry_safe ( iobuf, tmp, &tcp->tx_queue, list ) {
575 frag_len = iob_len ( iobuf );
576 if ( frag_len > max_len )
577 frag_len = max_len;
578 if ( dest ) {
579 memcpy ( iob_put ( dest, frag_len ), iobuf->data,
580 frag_len );
581 }
582 if ( remove ) {
583 iob_pull ( iobuf, frag_len );
584 if ( ! iob_len ( iobuf ) ) {
585 list_del ( &iobuf->list );
586 free_iob ( iobuf );
587 pending_put ( &tcp->pending_data );
588 }
589 }
590 len += frag_len;
591 max_len -= frag_len;
592 }
593 return len;
594 }
595
596 /**
597 * Transmit any outstanding data (with selective acknowledgement)
598 *
599 * @v tcp TCP connection
600 * @v sack_seq SEQ for first selective acknowledgement (if any)
601 *
602 * Transmits any outstanding data on the connection.
603 *
604 * Note that even if an error is returned, the retransmission timer
605 * will have been started if necessary, and so the stack will
606 * eventually attempt to retransmit the failed packet.
607 */
608 static void tcp_xmit_sack ( struct tcp_connection *tcp, uint32_t sack_seq ) {
609 struct io_buffer *iobuf;
610 struct tcp_header *tcphdr;
611 struct tcp_mss_option *mssopt;
612 struct tcp_window_scale_padded_option *wsopt;
613 struct tcp_timestamp_padded_option *tsopt;
614 struct tcp_sack_permitted_padded_option *spopt;
615 struct tcp_sack_padded_option *sackopt;
616 struct tcp_sack_block *sack;
617 void *payload;
618 unsigned int flags;
619 unsigned int sack_count;
620 unsigned int i;
621 size_t len = 0;
622 size_t sack_len;
623 uint32_t seq_len;
624 uint32_t max_rcv_win;
625 uint32_t max_representable_win;
626 int rc;
627
628 /* Start profiling */
629 profile_start ( &tcp_tx_profiler );
630
631 /* If retransmission timer is already running, do nothing */
632 if ( timer_running ( &tcp->timer ) )
633 return;
634
635 /* Calculate both the actual (payload) and sequence space
636 * lengths that we wish to transmit.
637 */
638 if ( TCP_CAN_SEND_DATA ( tcp->tcp_state ) ) {
639 len = tcp_process_tx_queue ( tcp, tcp_xmit_win ( tcp ),
640 NULL, 0 );
641 }
642 seq_len = len;
643 flags = TCP_FLAGS_SENDING ( tcp->tcp_state );
644 if ( flags & ( TCP_SYN | TCP_FIN ) ) {
645 /* SYN or FIN consume one byte, and we can never send both */
646 assert ( ! ( ( flags & TCP_SYN ) && ( flags & TCP_FIN ) ) );
647 seq_len++;
648 }
649 tcp->snd_sent = seq_len;
650
651 /* If we have nothing to transmit, stop now */
652 if ( ( seq_len == 0 ) && ! ( tcp->flags & TCP_ACK_PENDING ) )
653 return;
654
655 /* If we are transmitting anything that requires
656 * acknowledgement (i.e. consumes sequence space), start the
657 * retransmission timer. Do this before attempting to
658 * allocate the I/O buffer, in case allocation itself fails.
659 */
660 if ( seq_len )
661 start_timer ( &tcp->timer );
662
663 /* Allocate I/O buffer */
664 iobuf = alloc_iob ( len + TCP_MAX_HEADER_LEN );
665 if ( ! iobuf ) {
666 DBGC ( tcp, "TCP %p could not allocate iobuf for %08x..%08x "
667 "%08x\n", tcp, tcp->snd_seq, ( tcp->snd_seq + seq_len ),
668 tcp->rcv_ack );
669 return;
670 }
671 iob_reserve ( iobuf, TCP_MAX_HEADER_LEN );
672
673 /* Fill data payload from transmit queue */
674 tcp_process_tx_queue ( tcp, len, iobuf, 0 );
675
676 /* Expand receive window if possible */
677 max_rcv_win = xfer_window ( &tcp->xfer );
678 if ( max_rcv_win > TCP_MAX_WINDOW_SIZE )
679 max_rcv_win = TCP_MAX_WINDOW_SIZE;
680 max_representable_win = ( 0xffff << tcp->rcv_win_scale );
681 if ( max_rcv_win > max_representable_win )
682 max_rcv_win = max_representable_win;
683 max_rcv_win &= ~0x03; /* Keep everything dword-aligned */
684 if ( tcp->rcv_win < max_rcv_win )
685 tcp->rcv_win = max_rcv_win;
686
687 /* Fill up the TCP header */
688 payload = iobuf->data;
689 if ( flags & TCP_SYN ) {
690 mssopt = iob_push ( iobuf, sizeof ( *mssopt ) );
691 mssopt->kind = TCP_OPTION_MSS;
692 mssopt->length = sizeof ( *mssopt );
693 mssopt->mss = htons ( tcp->mss );
694 wsopt = iob_push ( iobuf, sizeof ( *wsopt ) );
695 wsopt->nop = TCP_OPTION_NOP;
696 wsopt->wsopt.kind = TCP_OPTION_WS;
697 wsopt->wsopt.length = sizeof ( wsopt->wsopt );
698 wsopt->wsopt.scale = TCP_RX_WINDOW_SCALE;
699 spopt = iob_push ( iobuf, sizeof ( *spopt ) );
700 memset ( spopt->nop, TCP_OPTION_NOP, sizeof ( spopt->nop ) );
701 spopt->spopt.kind = TCP_OPTION_SACK_PERMITTED;
702 spopt->spopt.length = sizeof ( spopt->spopt );
703 }
704 if ( ( flags & TCP_SYN ) || ( tcp->flags & TCP_TS_ENABLED ) ) {
705 tsopt = iob_push ( iobuf, sizeof ( *tsopt ) );
706 memset ( tsopt->nop, TCP_OPTION_NOP, sizeof ( tsopt->nop ) );
707 tsopt->tsopt.kind = TCP_OPTION_TS;
708 tsopt->tsopt.length = sizeof ( tsopt->tsopt );
709 tsopt->tsopt.tsval = htonl ( currticks() );
710 tsopt->tsopt.tsecr = htonl ( tcp->ts_recent );
711 }
712 if ( ( tcp->flags & TCP_SACK_ENABLED ) &&
713 ( ! list_empty ( &tcp->rx_queue ) ) &&
714 ( ( sack_count = tcp_sack ( tcp, sack_seq ) ) != 0 ) ) {
715 sack_len = ( sack_count * sizeof ( *sack ) );
716 sackopt = iob_push ( iobuf, ( sizeof ( *sackopt ) + sack_len ));
717 memset ( sackopt->nop, TCP_OPTION_NOP, sizeof ( sackopt->nop ));
718 sackopt->sackopt.kind = TCP_OPTION_SACK;
719 sackopt->sackopt.length =
720 ( sizeof ( sackopt->sackopt ) + sack_len );
721 sack = ( ( ( void * ) sackopt ) + sizeof ( *sackopt ) );
722 for ( i = 0 ; i < sack_count ; i++, sack++ ) {
723 sack->left = htonl ( tcp->sack[i].left );
724 sack->right = htonl ( tcp->sack[i].right );
725 }
726 }
727 if ( len != 0 )
728 flags |= TCP_PSH;
729 tcphdr = iob_push ( iobuf, sizeof ( *tcphdr ) );
730 memset ( tcphdr, 0, sizeof ( *tcphdr ) );
731 tcphdr->src = htons ( tcp->local_port );
732 tcphdr->dest = tcp->peer.st_port;
733 tcphdr->seq = htonl ( tcp->snd_seq );
734 tcphdr->ack = htonl ( tcp->rcv_ack );
735 tcphdr->hlen = ( ( payload - iobuf->data ) << 2 );
736 tcphdr->flags = flags;
737 tcphdr->win = htons ( tcp->rcv_win >> tcp->rcv_win_scale );
738 tcphdr->csum = tcpip_chksum ( iobuf->data, iob_len ( iobuf ) );
739
740 /* Dump header */
741 DBGC2 ( tcp, "TCP %p TX %d->%d %08x..%08x %08x %4zd",
742 tcp, ntohs ( tcphdr->src ), ntohs ( tcphdr->dest ),
743 ntohl ( tcphdr->seq ), ( ntohl ( tcphdr->seq ) + seq_len ),
744 ntohl ( tcphdr->ack ), len );
745 tcp_dump_flags ( tcp, tcphdr->flags );
746 DBGC2 ( tcp, "\n" );
747
748 /* Transmit packet */
749 if ( ( rc = tcpip_tx ( iobuf, &tcp_protocol, NULL, &tcp->peer, NULL,
750 &tcphdr->csum ) ) != 0 ) {
751 DBGC ( tcp, "TCP %p could not transmit %08x..%08x %08x: %s\n",
752 tcp, tcp->snd_seq, ( tcp->snd_seq + tcp->snd_sent ),
753 tcp->rcv_ack, strerror ( rc ) );
754 return;
755 }
756
757 /* Clear ACK-pending flag */
758 tcp->flags &= ~TCP_ACK_PENDING;
759
760 profile_stop ( &tcp_tx_profiler );
761 }
762
763 /**
764 * Transmit any outstanding data
765 *
766 * @v tcp TCP connection
767 */
768 static void tcp_xmit ( struct tcp_connection *tcp ) {
769
770 /* Transmit without an explicit first SACK */
771 tcp_xmit_sack ( tcp, tcp->rcv_ack );
772 }
773
774 /** TCP process descriptor */
775 static struct process_descriptor tcp_process_desc =
776 PROC_DESC_ONCE ( struct tcp_connection, process, tcp_xmit );
777
778 /**
779 * Retransmission timer expired
780 *
781 * @v timer Retransmission timer
782 * @v over Failure indicator
783 */
784 static void tcp_expired ( struct retry_timer *timer, int over ) {
785 struct tcp_connection *tcp =
786 container_of ( timer, struct tcp_connection, timer );
787
788 DBGC ( tcp, "TCP %p timer %s in %s for %08x..%08x %08x\n", tcp,
789 ( over ? "expired" : "fired" ), tcp_state ( tcp->tcp_state ),
790 tcp->snd_seq, ( tcp->snd_seq + tcp->snd_sent ), tcp->rcv_ack );
791
792 assert ( ( tcp->tcp_state == TCP_SYN_SENT ) ||
793 ( tcp->tcp_state == TCP_SYN_RCVD ) ||
794 ( tcp->tcp_state == TCP_ESTABLISHED ) ||
795 ( tcp->tcp_state == TCP_FIN_WAIT_1 ) ||
796 ( tcp->tcp_state == TCP_CLOSE_WAIT ) ||
797 ( tcp->tcp_state == TCP_CLOSING_OR_LAST_ACK ) );
798
799 if ( over ) {
800 /* If we have finally timed out and given up,
801 * terminate the connection
802 */
803 tcp->tcp_state = TCP_CLOSED;
804 tcp_dump_state ( tcp );
805 tcp_close ( tcp, -ETIMEDOUT );
806 } else {
807 /* Otherwise, retransmit the packet */
808 tcp_xmit ( tcp );
809 }
810 }
811
812 /**
813 * Keepalive timer expired
814 *
815 * @v timer Keepalive timer
816 * @v over Failure indicator
817 */
818 static void tcp_keepalive_expired ( struct retry_timer *timer,
819 int over __unused ) {
820 struct tcp_connection *tcp =
821 container_of ( timer, struct tcp_connection, keepalive );
822
823 DBGC ( tcp, "TCP %p sending keepalive\n", tcp );
824
825 /* Reset keepalive timer */
826 start_timer_fixed ( &tcp->keepalive, TCP_KEEPALIVE_DELAY );
827
828 /* Send keepalive. We do this only to preserve or restore
829 * state in intermediate devices (e.g. firewall NAT tables);
830 * we don't actually care about eliciting a response to verify
831 * that the peer is still alive. We therefore send just a
832 * pure ACK, to keep our transmit path simple.
833 */
834 tcp->flags |= TCP_ACK_PENDING;
835 tcp_xmit ( tcp );
836 }
837
838 /**
839 * Shutdown timer expired
840 *
841 * @v timer Shutdown timer
842 * @v over Failure indicator
843 */
844 static void tcp_wait_expired ( struct retry_timer *timer, int over __unused ) {
845 struct tcp_connection *tcp =
846 container_of ( timer, struct tcp_connection, wait );
847
848 assert ( tcp->tcp_state == TCP_TIME_WAIT );
849
850 DBGC ( tcp, "TCP %p wait complete in %s for %08x..%08x %08x\n", tcp,
851 tcp_state ( tcp->tcp_state ), tcp->snd_seq,
852 ( tcp->snd_seq + tcp->snd_sent ), tcp->rcv_ack );
853
854 tcp->tcp_state = TCP_CLOSED;
855 tcp_dump_state ( tcp );
856 tcp_close ( tcp, 0 );
857 }
858
859 /**
860 * Send RST response to incoming packet
861 *
862 * @v in_tcphdr TCP header of incoming packet
863 * @ret rc Return status code
864 */
865 static int tcp_xmit_reset ( struct tcp_connection *tcp,
866 struct sockaddr_tcpip *st_dest,
867 struct tcp_header *in_tcphdr ) {
868 struct io_buffer *iobuf;
869 struct tcp_header *tcphdr;
870 int rc;
871
872 /* Allocate space for dataless TX buffer */
873 iobuf = alloc_iob ( TCP_MAX_HEADER_LEN );
874 if ( ! iobuf ) {
875 DBGC ( tcp, "TCP %p could not allocate iobuf for RST "
876 "%08x..%08x %08x\n", tcp, ntohl ( in_tcphdr->ack ),
877 ntohl ( in_tcphdr->ack ), ntohl ( in_tcphdr->seq ) );
878 return -ENOMEM;
879 }
880 iob_reserve ( iobuf, TCP_MAX_HEADER_LEN );
881
882 /* Construct RST response */
883 tcphdr = iob_push ( iobuf, sizeof ( *tcphdr ) );
884 memset ( tcphdr, 0, sizeof ( *tcphdr ) );
885 tcphdr->src = in_tcphdr->dest;
886 tcphdr->dest = in_tcphdr->src;
887 tcphdr->seq = in_tcphdr->ack;
888 tcphdr->ack = in_tcphdr->seq;
889 tcphdr->hlen = ( ( sizeof ( *tcphdr ) / 4 ) << 4 );
890 tcphdr->flags = ( TCP_RST | TCP_ACK );
891 tcphdr->win = htons ( 0 );
892 tcphdr->csum = tcpip_chksum ( iobuf->data, iob_len ( iobuf ) );
893
894 /* Dump header */
895 DBGC2 ( tcp, "TCP %p TX %d->%d %08x..%08x %08x %4d",
896 tcp, ntohs ( tcphdr->src ), ntohs ( tcphdr->dest ),
897 ntohl ( tcphdr->seq ), ( ntohl ( tcphdr->seq ) ),
898 ntohl ( tcphdr->ack ), 0 );
899 tcp_dump_flags ( tcp, tcphdr->flags );
900 DBGC2 ( tcp, "\n" );
901
902 /* Transmit packet */
903 if ( ( rc = tcpip_tx ( iobuf, &tcp_protocol, NULL, st_dest,
904 NULL, &tcphdr->csum ) ) != 0 ) {
905 DBGC ( tcp, "TCP %p could not transmit RST %08x..%08x %08x: "
906 "%s\n", tcp, ntohl ( in_tcphdr->ack ),
907 ntohl ( in_tcphdr->ack ), ntohl ( in_tcphdr->seq ),
908 strerror ( rc ) );
909 return rc;
910 }
911
912 return 0;
913 }
914
915 /***************************************************************************
916 *
917 * Receive data path
918 *
919 ***************************************************************************
920 */
921
922 /**
923 * Identify TCP connection by local port number
924 *
925 * @v local_port Local port
926 * @ret tcp TCP connection, or NULL
927 */
928 static struct tcp_connection * tcp_demux ( unsigned int local_port ) {
929 struct tcp_connection *tcp;
930
931 list_for_each_entry ( tcp, &tcp_conns, list ) {
932 if ( tcp->local_port == local_port )
933 return tcp;
934 }
935 return NULL;
936 }
937
938 /**
939 * Parse TCP received options
940 *
941 * @v tcp TCP connection (may be NULL)
942 * @v tcphdr TCP header
943 * @v hlen TCP header length
944 * @v options Options structure to fill in
945 * @ret rc Return status code
946 */
947 static int tcp_rx_opts ( struct tcp_connection *tcp,
948 const struct tcp_header *tcphdr, size_t hlen,
949 struct tcp_options *options ) {
950 const void *data = ( ( ( void * ) tcphdr ) + sizeof ( *tcphdr ) );
951 const void *end = ( ( ( void * ) tcphdr ) + hlen );
952 const struct tcp_option *option;
953 unsigned int kind;
954 size_t remaining;
955 size_t min;
956
957 /* Sanity check */
958 assert ( hlen >= sizeof ( *tcphdr ) );
959
960 /* Parse options */
961 memset ( options, 0, sizeof ( *options ) );
962 while ( ( remaining = ( end - data ) ) ) {
963
964 /* Extract option code */
965 option = data;
966 kind = option->kind;
967
968 /* Handle single-byte options */
969 if ( kind == TCP_OPTION_END )
970 break;
971 if ( kind == TCP_OPTION_NOP ) {
972 data++;
973 continue;
974 }
975
976 /* Handle multi-byte options */
977 min = sizeof ( *option );
978 switch ( kind ) {
979 case TCP_OPTION_MSS:
980 /* Ignore received MSS */
981 break;
982 case TCP_OPTION_WS:
983 options->wsopt = data;
984 min = sizeof ( *options->wsopt );
985 break;
986 case TCP_OPTION_SACK_PERMITTED:
987 options->spopt = data;
988 min = sizeof ( *options->spopt );
989 break;
990 case TCP_OPTION_SACK:
991 /* Ignore received SACKs */
992 break;
993 case TCP_OPTION_TS:
994 options->tsopt = data;
995 min = sizeof ( *options->tsopt );
996 break;
997 default:
998 DBGC ( tcp, "TCP %p received unknown option %d\n",
999 tcp, kind );
1000 break;
1001 }
1002 if ( remaining < min ) {
1003 DBGC ( tcp, "TCP %p received truncated option %d\n",
1004 tcp, kind );
1005 return -EINVAL;
1006 }
1007 if ( option->length < min ) {
1008 DBGC ( tcp, "TCP %p received underlength option %d\n",
1009 tcp, kind );
1010 return -EINVAL;
1011 }
1012 if ( option->length > remaining ) {
1013 DBGC ( tcp, "TCP %p received overlength option %d\n",
1014 tcp, kind );
1015 return -EINVAL;
1016 }
1017 data += option->length;
1018 }
1019
1020 return 0;
1021 }
1022
1023 /**
1024 * Consume received sequence space
1025 *
1026 * @v tcp TCP connection
1027 * @v seq_len Sequence space length to consume
1028 */
1029 static void tcp_rx_seq ( struct tcp_connection *tcp, uint32_t seq_len ) {
1030 unsigned int sack;
1031
1032 /* Sanity check */
1033 assert ( seq_len > 0 );
1034
1035 /* Update acknowledgement number */
1036 tcp->rcv_ack += seq_len;
1037
1038 /* Update window */
1039 if ( tcp->rcv_win > seq_len ) {
1040 tcp->rcv_win -= seq_len;
1041 } else {
1042 tcp->rcv_win = 0;
1043 }
1044
1045 /* Update timestamp */
1046 tcp->ts_recent = tcp->ts_val;
1047
1048 /* Update SACK list */
1049 for ( sack = 0 ; sack < TCP_SACK_MAX ; sack++ ) {
1050 if ( tcp->sack[sack].left == tcp->sack[sack].right )
1051 continue;
1052 if ( tcp_cmp ( tcp->sack[sack].left, tcp->rcv_ack ) < 0 )
1053 tcp->sack[sack].left = tcp->rcv_ack;
1054 if ( tcp_cmp ( tcp->sack[sack].right, tcp->rcv_ack ) < 0 )
1055 tcp->sack[sack].right = tcp->rcv_ack;
1056 }
1057
1058 /* Mark ACK as pending */
1059 tcp->flags |= TCP_ACK_PENDING;
1060 }
1061
1062 /**
1063 * Handle TCP received SYN
1064 *
1065 * @v tcp TCP connection
1066 * @v seq SEQ value (in host-endian order)
1067 * @v options TCP options
1068 * @ret rc Return status code
1069 */
1070 static int tcp_rx_syn ( struct tcp_connection *tcp, uint32_t seq,
1071 struct tcp_options *options ) {
1072
1073 /* Synchronise sequence numbers on first SYN */
1074 if ( ! ( tcp->tcp_state & TCP_STATE_RCVD ( TCP_SYN ) ) ) {
1075 tcp->rcv_ack = seq;
1076 if ( options->tsopt )
1077 tcp->flags |= TCP_TS_ENABLED;
1078 if ( options->spopt )
1079 tcp->flags |= TCP_SACK_ENABLED;
1080 if ( options->wsopt ) {
1081 tcp->snd_win_scale = options->wsopt->scale;
1082 tcp->rcv_win_scale = TCP_RX_WINDOW_SCALE;
1083 }
1084 DBGC ( tcp, "TCP %p using %stimestamps, %sSACK, TX window "
1085 "x%d, RX window x%d\n", tcp,
1086 ( ( tcp->flags & TCP_TS_ENABLED ) ? "" : "no " ),
1087 ( ( tcp->flags & TCP_SACK_ENABLED ) ? "" : "no " ),
1088 ( 1 << tcp->snd_win_scale ),
1089 ( 1 << tcp->rcv_win_scale ) );
1090 }
1091
1092 /* Ignore duplicate SYN */
1093 if ( seq != tcp->rcv_ack )
1094 return 0;
1095
1096 /* Acknowledge SYN */
1097 tcp_rx_seq ( tcp, 1 );
1098
1099 /* Mark SYN as received and start sending ACKs with each packet */
1100 tcp->tcp_state |= ( TCP_STATE_SENT ( TCP_ACK ) |
1101 TCP_STATE_RCVD ( TCP_SYN ) );
1102
1103 return 0;
1104 }
1105
1106 /**
1107 * Handle TCP received ACK
1108 *
1109 * @v tcp TCP connection
1110 * @v ack ACK value (in host-endian order)
1111 * @v win WIN value (in host-endian order)
1112 * @ret rc Return status code
1113 */
1114 static int tcp_rx_ack ( struct tcp_connection *tcp, uint32_t ack,
1115 uint32_t win ) {
1116 uint32_t ack_len = ( ack - tcp->snd_seq );
1117 size_t len;
1118 unsigned int acked_flags;
1119
1120 /* Check for out-of-range or old duplicate ACKs */
1121 if ( ack_len > tcp->snd_sent ) {
1122 DBGC ( tcp, "TCP %p received ACK for %08x..%08x, "
1123 "sent only %08x..%08x\n", tcp, tcp->snd_seq,
1124 ( tcp->snd_seq + ack_len ), tcp->snd_seq,
1125 ( tcp->snd_seq + tcp->snd_sent ) );
1126
1127 if ( TCP_HAS_BEEN_ESTABLISHED ( tcp->tcp_state ) ) {
1128 /* Just ignore what might be old duplicate ACKs */
1129 return 0;
1130 } else {
1131 /* Send RST if an out-of-range ACK is received
1132 * on a not-yet-established connection, as per
1133 * RFC 793.
1134 */
1135 return -EINVAL;
1136 }
1137 }
1138
1139 /* Update window size */
1140 tcp->snd_win = win;
1141
1142 /* Hold off (or start) the keepalive timer, if applicable */
1143 if ( ! ( tcp->tcp_state & TCP_STATE_SENT ( TCP_FIN ) ) )
1144 start_timer_fixed ( &tcp->keepalive, TCP_KEEPALIVE_DELAY );
1145
1146 /* Ignore ACKs that don't actually acknowledge any new data.
1147 * (In particular, do not stop the retransmission timer; this
1148 * avoids creating a sorceror's apprentice syndrome when a
1149 * duplicate ACK is received and we still have data in our
1150 * transmit queue.)
1151 */
1152 if ( ack_len == 0 )
1153 return 0;
1154
1155 /* Stop the retransmission timer */
1156 stop_timer ( &tcp->timer );
1157
1158 /* Determine acknowledged flags and data length */
1159 len = ack_len;
1160 acked_flags = ( TCP_FLAGS_SENDING ( tcp->tcp_state ) &
1161 ( TCP_SYN | TCP_FIN ) );
1162 if ( acked_flags ) {
1163 len--;
1164 pending_put ( &tcp->pending_flags );
1165 }
1166
1167 /* Update SEQ and sent counters */
1168 tcp->snd_seq = ack;
1169 tcp->snd_sent = 0;
1170
1171 /* Remove any acknowledged data from transmit queue */
1172 tcp_process_tx_queue ( tcp, len, NULL, 1 );
1173
1174 /* Mark SYN/FIN as acknowledged if applicable. */
1175 if ( acked_flags )
1176 tcp->tcp_state |= TCP_STATE_ACKED ( acked_flags );
1177
1178 /* Start sending FIN if we've had all possible data ACKed */
1179 if ( list_empty ( &tcp->tx_queue ) &&
1180 ( tcp->flags & TCP_XFER_CLOSED ) &&
1181 ! ( tcp->tcp_state & TCP_STATE_SENT ( TCP_FIN ) ) ) {
1182 tcp->tcp_state |= TCP_STATE_SENT ( TCP_FIN );
1183 pending_get ( &tcp->pending_flags );
1184 }
1185
1186 return 0;
1187 }
1188
1189 /**
1190 * Handle TCP received data
1191 *
1192 * @v tcp TCP connection
1193 * @v seq SEQ value (in host-endian order)
1194 * @v iobuf I/O buffer
1195 * @ret rc Return status code
1196 *
1197 * This function takes ownership of the I/O buffer.
1198 */
1199 static int tcp_rx_data ( struct tcp_connection *tcp, uint32_t seq,
1200 struct io_buffer *iobuf ) {
1201 uint32_t already_rcvd;
1202 uint32_t len;
1203 int rc;
1204
1205 /* Ignore duplicate or out-of-order data */
1206 already_rcvd = ( tcp->rcv_ack - seq );
1207 len = iob_len ( iobuf );
1208 if ( already_rcvd >= len ) {
1209 free_iob ( iobuf );
1210 return 0;
1211 }
1212 iob_pull ( iobuf, already_rcvd );
1213 len -= already_rcvd;
1214
1215 /* Acknowledge new data */
1216 tcp_rx_seq ( tcp, len );
1217
1218 /* Deliver data to application */
1219 profile_start ( &tcp_xfer_profiler );
1220 if ( ( rc = xfer_deliver_iob ( &tcp->xfer, iobuf ) ) != 0 ) {
1221 DBGC ( tcp, "TCP %p could not deliver %08x..%08x: %s\n",
1222 tcp, seq, ( seq + len ), strerror ( rc ) );
1223 return rc;
1224 }
1225 profile_stop ( &tcp_xfer_profiler );
1226
1227 return 0;
1228 }
1229
1230 /**
1231 * Handle TCP received FIN
1232 *
1233 * @v tcp TCP connection
1234 * @v seq SEQ value (in host-endian order)
1235 * @ret rc Return status code
1236 */
1237 static int tcp_rx_fin ( struct tcp_connection *tcp, uint32_t seq ) {
1238
1239 /* Ignore duplicate or out-of-order FIN */
1240 if ( seq != tcp->rcv_ack )
1241 return 0;
1242
1243 /* Acknowledge FIN */
1244 tcp_rx_seq ( tcp, 1 );
1245
1246 /* Mark FIN as received */
1247 tcp->tcp_state |= TCP_STATE_RCVD ( TCP_FIN );
1248
1249 /* Close connection */
1250 tcp_close ( tcp, 0 );
1251
1252 return 0;
1253 }
1254
1255 /**
1256 * Handle TCP received RST
1257 *
1258 * @v tcp TCP connection
1259 * @v seq SEQ value (in host-endian order)
1260 * @ret rc Return status code
1261 */
1262 static int tcp_rx_rst ( struct tcp_connection *tcp, uint32_t seq ) {
1263
1264 /* Accept RST only if it falls within the window. If we have
1265 * not yet received a SYN, then we have no window to test
1266 * against, so fall back to checking that our SYN has been
1267 * ACKed.
1268 */
1269 if ( tcp->tcp_state & TCP_STATE_RCVD ( TCP_SYN ) ) {
1270 if ( ! tcp_in_window ( seq, tcp->rcv_ack, tcp->rcv_win ) )
1271 return 0;
1272 } else {
1273 if ( ! ( tcp->tcp_state & TCP_STATE_ACKED ( TCP_SYN ) ) )
1274 return 0;
1275 }
1276
1277 /* Abort connection */
1278 tcp->tcp_state = TCP_CLOSED;
1279 tcp_dump_state ( tcp );
1280 tcp_close ( tcp, -ECONNRESET );
1281
1282 DBGC ( tcp, "TCP %p connection reset by peer\n", tcp );
1283 return -ECONNRESET;
1284 }
1285
1286 /**
1287 * Enqueue received TCP packet
1288 *
1289 * @v tcp TCP connection
1290 * @v seq SEQ value (in host-endian order)
1291 * @v flags TCP flags
1292 * @v iobuf I/O buffer
1293 */
1294 static void tcp_rx_enqueue ( struct tcp_connection *tcp, uint32_t seq,
1295 uint8_t flags, struct io_buffer *iobuf ) {
1296 struct tcp_rx_queued_header *tcpqhdr;
1297 struct io_buffer *queued;
1298 size_t len;
1299 uint32_t seq_len;
1300 uint32_t nxt;
1301
1302 /* Calculate remaining flags and sequence length. Note that
1303 * SYN, if present, has already been processed by this point.
1304 */
1305 flags &= TCP_FIN;
1306 len = iob_len ( iobuf );
1307 seq_len = ( len + ( flags ? 1 : 0 ) );
1308 nxt = ( seq + seq_len );
1309
1310 /* Discard immediately (to save memory) if:
1311 *
1312 * a) we have not yet received a SYN (and so have no defined
1313 * receive window), or
1314 * b) the packet lies entirely outside the receive window, or
1315 * c) there is no further content to process.
1316 */
1317 if ( ( ! ( tcp->tcp_state & TCP_STATE_RCVD ( TCP_SYN ) ) ) ||
1318 ( tcp_cmp ( seq, tcp->rcv_ack + tcp->rcv_win ) >= 0 ) ||
1319 ( tcp_cmp ( nxt, tcp->rcv_ack ) < 0 ) ||
1320 ( seq_len == 0 ) ) {
1321 free_iob ( iobuf );
1322 return;
1323 }
1324
1325 /* Add internal header */
1326 tcpqhdr = iob_push ( iobuf, sizeof ( *tcpqhdr ) );
1327 tcpqhdr->seq = seq;
1328 tcpqhdr->nxt = nxt;
1329 tcpqhdr->flags = flags;
1330
1331 /* Add to RX queue */
1332 list_for_each_entry ( queued, &tcp->rx_queue, list ) {
1333 tcpqhdr = queued->data;
1334 if ( tcp_cmp ( seq, tcpqhdr->seq ) < 0 )
1335 break;
1336 }
1337 list_add_tail ( &iobuf->list, &queued->list );
1338 }
1339
1340 /**
1341 * Process receive queue
1342 *
1343 * @v tcp TCP connection
1344 */
1345 static void tcp_process_rx_queue ( struct tcp_connection *tcp ) {
1346 struct io_buffer *iobuf;
1347 struct tcp_rx_queued_header *tcpqhdr;
1348 uint32_t seq;
1349 unsigned int flags;
1350 size_t len;
1351
1352 /* Process all applicable received buffers. Note that we
1353 * cannot use list_for_each_entry() to iterate over the RX
1354 * queue, since tcp_discard() may remove packets from the RX
1355 * queue while we are processing.
1356 */
1357 while ( ( iobuf = list_first_entry ( &tcp->rx_queue, struct io_buffer,
1358 list ) ) ) {
1359
1360 /* Stop processing when we hit the first gap */
1361 tcpqhdr = iobuf->data;
1362 if ( tcp_cmp ( tcpqhdr->seq, tcp->rcv_ack ) > 0 )
1363 break;
1364
1365 /* Strip internal header and remove from RX queue */
1366 list_del ( &iobuf->list );
1367 seq = tcpqhdr->seq;
1368 flags = tcpqhdr->flags;
1369 iob_pull ( iobuf, sizeof ( *tcpqhdr ) );
1370 len = iob_len ( iobuf );
1371
1372 /* Handle new data, if any */
1373 tcp_rx_data ( tcp, seq, iob_disown ( iobuf ) );
1374 seq += len;
1375
1376 /* Handle FIN, if present */
1377 if ( flags & TCP_FIN ) {
1378 tcp_rx_fin ( tcp, seq );
1379 seq++;
1380 }
1381 }
1382 }
1383
1384 /**
1385 * Process received packet
1386 *
1387 * @v iobuf I/O buffer
1388 * @v netdev Network device
1389 * @v st_src Partially-filled source address
1390 * @v st_dest Partially-filled destination address
1391 * @v pshdr_csum Pseudo-header checksum
1392 * @ret rc Return status code
1393 */
1394 static int tcp_rx ( struct io_buffer *iobuf,
1395 struct net_device *netdev __unused,
1396 struct sockaddr_tcpip *st_src,
1397 struct sockaddr_tcpip *st_dest __unused,
1398 uint16_t pshdr_csum ) {
1399 struct tcp_header *tcphdr = iobuf->data;
1400 struct tcp_connection *tcp;
1401 struct tcp_options options;
1402 size_t hlen;
1403 uint16_t csum;
1404 uint32_t seq;
1405 uint32_t ack;
1406 uint16_t raw_win;
1407 uint32_t win;
1408 unsigned int flags;
1409 size_t len;
1410 uint32_t seq_len;
1411 size_t old_xfer_window;
1412 int rc;
1413
1414 /* Start profiling */
1415 profile_start ( &tcp_rx_profiler );
1416
1417 /* Sanity check packet */
1418 if ( iob_len ( iobuf ) < sizeof ( *tcphdr ) ) {
1419 DBG ( "TCP packet too short at %zd bytes (min %zd bytes)\n",
1420 iob_len ( iobuf ), sizeof ( *tcphdr ) );
1421 rc = -EINVAL;
1422 goto discard;
1423 }
1424 hlen = ( ( tcphdr->hlen & TCP_MASK_HLEN ) / 16 ) * 4;
1425 if ( hlen < sizeof ( *tcphdr ) ) {
1426 DBG ( "TCP header too short at %zd bytes (min %zd bytes)\n",
1427 hlen, sizeof ( *tcphdr ) );
1428 rc = -EINVAL;
1429 goto discard;
1430 }
1431 if ( hlen > iob_len ( iobuf ) ) {
1432 DBG ( "TCP header too long at %zd bytes (max %zd bytes)\n",
1433 hlen, iob_len ( iobuf ) );
1434 rc = -EINVAL;
1435 goto discard;
1436 }
1437 csum = tcpip_continue_chksum ( pshdr_csum, iobuf->data,
1438 iob_len ( iobuf ) );
1439 if ( csum != 0 ) {
1440 DBG ( "TCP checksum incorrect (is %04x including checksum "
1441 "field, should be 0000)\n", csum );
1442 rc = -EINVAL;
1443 goto discard;
1444 }
1445
1446 /* Parse parameters from header and strip header */
1447 tcp = tcp_demux ( ntohs ( tcphdr->dest ) );
1448 seq = ntohl ( tcphdr->seq );
1449 ack = ntohl ( tcphdr->ack );
1450 raw_win = ntohs ( tcphdr->win );
1451 flags = tcphdr->flags;
1452 if ( ( rc = tcp_rx_opts ( tcp, tcphdr, hlen, &options ) ) != 0 )
1453 goto discard;
1454 if ( tcp && options.tsopt )
1455 tcp->ts_val = ntohl ( options.tsopt->tsval );
1456 iob_pull ( iobuf, hlen );
1457 len = iob_len ( iobuf );
1458 seq_len = ( len + ( ( flags & TCP_SYN ) ? 1 : 0 ) +
1459 ( ( flags & TCP_FIN ) ? 1 : 0 ) );
1460
1461 /* Dump header */
1462 DBGC2 ( tcp, "TCP %p RX %d<-%d %08x %08x..%08x %4zd",
1463 tcp, ntohs ( tcphdr->dest ), ntohs ( tcphdr->src ),
1464 ntohl ( tcphdr->ack ), ntohl ( tcphdr->seq ),
1465 ( ntohl ( tcphdr->seq ) + seq_len ), len );
1466 tcp_dump_flags ( tcp, tcphdr->flags );
1467 DBGC2 ( tcp, "\n" );
1468
1469 /* If no connection was found, silently drop packet */
1470 if ( ! tcp ) {
1471 rc = -ENOTCONN;
1472 goto discard;
1473 }
1474
1475 /* Record old data-transfer window */
1476 old_xfer_window = tcp_xfer_window ( tcp );
1477
1478 /* Handle ACK, if present */
1479 if ( flags & TCP_ACK ) {
1480 win = ( raw_win << tcp->snd_win_scale );
1481 if ( ( rc = tcp_rx_ack ( tcp, ack, win ) ) != 0 ) {
1482 tcp_xmit_reset ( tcp, st_src, tcphdr );
1483 goto discard;
1484 }
1485 }
1486
1487 /* Force an ACK if this packet is out of order */
1488 if ( ( tcp->tcp_state & TCP_STATE_RCVD ( TCP_SYN ) ) &&
1489 ( seq != tcp->rcv_ack ) ) {
1490 tcp->flags |= TCP_ACK_PENDING;
1491 }
1492
1493 /* Handle SYN, if present */
1494 if ( flags & TCP_SYN ) {
1495 tcp_rx_syn ( tcp, seq, &options );
1496 seq++;
1497 }
1498
1499 /* Handle RST, if present */
1500 if ( flags & TCP_RST ) {
1501 if ( ( rc = tcp_rx_rst ( tcp, seq ) ) != 0 )
1502 goto discard;
1503 }
1504
1505 /* Enqueue received data */
1506 tcp_rx_enqueue ( tcp, seq, flags, iob_disown ( iobuf ) );
1507
1508 /* Process receive queue */
1509 tcp_process_rx_queue ( tcp );
1510
1511 /* Dump out any state change as a result of the received packet */
1512 tcp_dump_state ( tcp );
1513
1514 /* Schedule transmission of ACK (and any pending data). If we
1515 * have received any out-of-order packets (i.e. if the receive
1516 * queue remains non-empty after processing) then send the ACK
1517 * immediately in order to trigger Fast Retransmission.
1518 */
1519 if ( list_empty ( &tcp->rx_queue ) ) {
1520 process_add ( &tcp->process );
1521 } else {
1522 tcp_xmit_sack ( tcp, seq );
1523 }
1524
1525 /* If this packet was the last we expect to receive, set up
1526 * timer to expire and cause the connection to be freed.
1527 */
1528 if ( TCP_CLOSED_GRACEFULLY ( tcp->tcp_state ) ) {
1529 stop_timer ( &tcp->wait );
1530 start_timer_fixed ( &tcp->wait, ( 2 * TCP_MSL ) );
1531 }
1532
1533 /* Notify application if window has changed */
1534 if ( tcp_xfer_window ( tcp ) != old_xfer_window )
1535 xfer_window_changed ( &tcp->xfer );
1536
1537 profile_stop ( &tcp_rx_profiler );
1538 return 0;
1539
1540 discard:
1541 /* Free received packet */
1542 free_iob ( iobuf );
1543 return rc;
1544 }
1545
1546 /** TCP protocol */
1547 struct tcpip_protocol tcp_protocol __tcpip_protocol = {
1548 .name = "TCP",
1549 .rx = tcp_rx,
1550 .tcpip_proto = IP_TCP,
1551 };
1552
1553 /**
1554 * Discard some cached TCP data
1555 *
1556 * @ret discarded Number of cached items discarded
1557 */
1558 static unsigned int tcp_discard ( void ) {
1559 struct tcp_connection *tcp;
1560 struct io_buffer *iobuf;
1561 unsigned int discarded = 0;
1562
1563 /* Try to drop one queued RX packet from each connection */
1564 list_for_each_entry ( tcp, &tcp_conns, list ) {
1565 list_for_each_entry_reverse ( iobuf, &tcp->rx_queue, list ) {
1566
1567 /* Remove packet from queue */
1568 list_del ( &iobuf->list );
1569 free_iob ( iobuf );
1570
1571 /* Report discard */
1572 discarded++;
1573 break;
1574 }
1575 }
1576
1577 return discarded;
1578 }
1579
1580 /** TCP cache discarder */
1581 struct cache_discarder tcp_discarder __cache_discarder ( CACHE_NORMAL ) = {
1582 .discard = tcp_discard,
1583 };
1584
1585 /**
1586 * Find first TCP connection that has not yet been closed
1587 *
1588 * @ret tcp First unclosed connection, or NULL
1589 */
1590 static struct tcp_connection * tcp_first_unclosed ( void ) {
1591 struct tcp_connection *tcp;
1592
1593 /* Find first connection which has not yet been closed */
1594 list_for_each_entry ( tcp, &tcp_conns, list ) {
1595 if ( ! ( tcp->flags & TCP_XFER_CLOSED ) )
1596 return tcp;
1597 }
1598 return NULL;
1599 }
1600
1601 /**
1602 * Find first TCP connection that has not yet finished all operations
1603 *
1604 * @ret tcp First unfinished connection, or NULL
1605 */
1606 static struct tcp_connection * tcp_first_unfinished ( void ) {
1607 struct tcp_connection *tcp;
1608
1609 /* Find first connection which has not yet closed gracefully,
1610 * or which still has a pending transmission (e.g. to ACK the
1611 * received FIN).
1612 */
1613 list_for_each_entry ( tcp, &tcp_conns, list ) {
1614 if ( ( ! TCP_CLOSED_GRACEFULLY ( tcp->tcp_state ) ) ||
1615 process_running ( &tcp->process ) ) {
1616 return tcp;
1617 }
1618 }
1619 return NULL;
1620 }
1621
1622 /**
1623 * Shut down all TCP connections
1624 *
1625 */
1626 static void tcp_shutdown ( int booting __unused ) {
1627 struct tcp_connection *tcp;
1628 unsigned long start;
1629 unsigned long elapsed;
1630
1631 /* Initiate a graceful close of all connections, allowing for
1632 * the fact that the connection list may change as we do so.
1633 */
1634 while ( ( tcp = tcp_first_unclosed() ) ) {
1635 DBGC ( tcp, "TCP %p closing for shutdown\n", tcp );
1636 tcp_close ( tcp, -ECANCELED );
1637 }
1638
1639 /* Wait for all connections to finish closing gracefully */
1640 start = currticks();
1641 while ( ( tcp = tcp_first_unfinished() ) &&
1642 ( ( elapsed = ( currticks() - start ) ) < TCP_FINISH_TIMEOUT )){
1643 step();
1644 }
1645
1646 /* Forcibly close any remaining connections */
1647 while ( ( tcp = list_first_entry ( &tcp_conns, struct tcp_connection,
1648 list ) ) != NULL ) {
1649 tcp->tcp_state = TCP_CLOSED;
1650 tcp_dump_state ( tcp );
1651 tcp_close ( tcp, -ECANCELED );
1652 }
1653 }
1654
1655 /** TCP shutdown function */
1656 struct startup_fn tcp_startup_fn __startup_fn ( STARTUP_LATE ) = {
1657 .name = "tcp",
1658 .shutdown = tcp_shutdown,
1659 };
1660
1661 /***************************************************************************
1662 *
1663 * Data transfer interface
1664 *
1665 ***************************************************************************
1666 */
1667
1668 /**
1669 * Close interface
1670 *
1671 * @v tcp TCP connection
1672 * @v rc Reason for close
1673 */
1674 static void tcp_xfer_close ( struct tcp_connection *tcp, int rc ) {
1675
1676 /* Close data transfer interface */
1677 tcp_close ( tcp, rc );
1678
1679 /* Transmit FIN, if possible */
1680 tcp_xmit ( tcp );
1681 }
1682
1683 /**
1684 * Deliver datagram as I/O buffer
1685 *
1686 * @v tcp TCP connection
1687 * @v iobuf Datagram I/O buffer
1688 * @v meta Data transfer metadata
1689 * @ret rc Return status code
1690 */
1691 static int tcp_xfer_deliver ( struct tcp_connection *tcp,
1692 struct io_buffer *iobuf,
1693 struct xfer_metadata *meta __unused ) {
1694
1695 /* Enqueue packet */
1696 list_add_tail ( &iobuf->list, &tcp->tx_queue );
1697
1698 /* Each enqueued packet is a pending operation */
1699 pending_get ( &tcp->pending_data );
1700
1701 /* Transmit data, if possible */
1702 tcp_xmit ( tcp );
1703
1704 return 0;
1705 }
1706
1707 /** TCP data transfer interface operations */
1708 static struct interface_operation tcp_xfer_operations[] = {
1709 INTF_OP ( xfer_deliver, struct tcp_connection *, tcp_xfer_deliver ),
1710 INTF_OP ( xfer_window, struct tcp_connection *, tcp_xfer_window ),
1711 INTF_OP ( intf_close, struct tcp_connection *, tcp_xfer_close ),
1712 };
1713
1714 /** TCP data transfer interface descriptor */
1715 static struct interface_descriptor tcp_xfer_desc =
1716 INTF_DESC ( struct tcp_connection, xfer, tcp_xfer_operations );
1717
1718 /***************************************************************************
1719 *
1720 * Openers
1721 *
1722 ***************************************************************************
1723 */
1724
1725 /** TCP IPv4 socket opener */
1726 struct socket_opener tcp_ipv4_socket_opener __socket_opener = {
1727 .semantics = TCP_SOCK_STREAM,
1728 .family = AF_INET,
1729 .open = tcp_open,
1730 };
1731
1732 /** TCP IPv6 socket opener */
1733 struct socket_opener tcp_ipv6_socket_opener __socket_opener = {
1734 .semantics = TCP_SOCK_STREAM,
1735 .family = AF_INET6,
1736 .open = tcp_open,
1737 };
1738
1739 /** Linkage hack */
1740 int tcp_sock_stream = TCP_SOCK_STREAM;
1741
1742 /**
1743 * Open TCP URI
1744 *
1745 * @v xfer Data transfer interface
1746 * @v uri URI
1747 * @ret rc Return status code
1748 */
1749 static int tcp_open_uri ( struct interface *xfer, struct uri *uri ) {
1750 struct sockaddr_tcpip peer;
1751
1752 /* Sanity check */
1753 if ( ! uri->host )
1754 return -EINVAL;
1755
1756 memset ( &peer, 0, sizeof ( peer ) );
1757 peer.st_port = htons ( uri_port ( uri, 0 ) );
1758 return xfer_open_named_socket ( xfer, SOCK_STREAM,
1759 ( struct sockaddr * ) &peer,
1760 uri->host, NULL );
1761 }
1762
1763 /** TCP URI opener */
1764 struct uri_opener tcp_uri_opener __uri_opener = {
1765 .scheme = "tcp",
1766 .open = tcp_open_uri,
1767 };
1768