[tcpip] Allow supported address families to be detected at runtime
[ipxe.git] / src / net / tcp.c
1 #include <string.h>
2 #include <stdlib.h>
3 #include <stdio.h>
4 #include <assert.h>
5 #include <errno.h>
6 #include <byteswap.h>
7 #include <ipxe/timer.h>
8 #include <ipxe/iobuf.h>
9 #include <ipxe/malloc.h>
10 #include <ipxe/init.h>
11 #include <ipxe/retry.h>
12 #include <ipxe/refcnt.h>
13 #include <ipxe/pending.h>
14 #include <ipxe/xfer.h>
15 #include <ipxe/open.h>
16 #include <ipxe/uri.h>
17 #include <ipxe/netdevice.h>
18 #include <ipxe/profile.h>
19 #include <ipxe/process.h>
20 #include <ipxe/tcpip.h>
21 #include <ipxe/tcp.h>
22
23 /** @file
24 *
25 * TCP protocol
26 *
27 */
28
29 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
30
31 /** A TCP connection */
32 struct tcp_connection {
33 /** Reference counter */
34 struct refcnt refcnt;
35 /** List of TCP connections */
36 struct list_head list;
37
38 /** Flags */
39 unsigned int flags;
40
41 /** Data transfer interface */
42 struct interface xfer;
43
44 /** Remote socket address */
45 struct sockaddr_tcpip peer;
46 /** Local port */
47 unsigned int local_port;
48 /** Maximum segment size */
49 size_t mss;
50
51 /** Current TCP state */
52 unsigned int tcp_state;
53 /** Previous TCP state
54 *
55 * Maintained only for debug messages
56 */
57 unsigned int prev_tcp_state;
58 /** Current sequence number
59 *
60 * Equivalent to SND.UNA in RFC 793 terminology.
61 */
62 uint32_t snd_seq;
63 /** Unacknowledged sequence count
64 *
65 * Equivalent to (SND.NXT-SND.UNA) in RFC 793 terminology.
66 */
67 uint32_t snd_sent;
68 /** Send window
69 *
70 * Equivalent to SND.WND in RFC 793 terminology
71 */
72 uint32_t snd_win;
73 /** Current acknowledgement number
74 *
75 * Equivalent to RCV.NXT in RFC 793 terminology.
76 */
77 uint32_t rcv_ack;
78 /** Receive window
79 *
80 * Equivalent to RCV.WND in RFC 793 terminology.
81 */
82 uint32_t rcv_win;
83 /** Received timestamp value
84 *
85 * Updated when a packet is received; copied to ts_recent when
86 * the window is advanced.
87 */
88 uint32_t ts_val;
89 /** Most recent received timestamp that advanced the window
90 *
91 * Equivalent to TS.Recent in RFC 1323 terminology.
92 */
93 uint32_t ts_recent;
94 /** Send window scale
95 *
96 * Equivalent to Snd.Wind.Scale in RFC 1323 terminology
97 */
98 uint8_t snd_win_scale;
99 /** Receive window scale
100 *
101 * Equivalent to Rcv.Wind.Scale in RFC 1323 terminology
102 */
103 uint8_t rcv_win_scale;
104
105 /** Selective acknowledgement list (in host-endian order) */
106 struct tcp_sack_block sack[TCP_SACK_MAX];
107
108 /** Transmit queue */
109 struct list_head tx_queue;
110 /** Receive queue */
111 struct list_head rx_queue;
112 /** Transmission process */
113 struct process process;
114 /** Retransmission timer */
115 struct retry_timer timer;
116 /** Shutdown (TIME_WAIT) timer */
117 struct retry_timer wait;
118
119 /** Pending operations for SYN and FIN */
120 struct pending_operation pending_flags;
121 /** Pending operations for transmit queue */
122 struct pending_operation pending_data;
123 };
124
125 /** TCP flags */
126 enum tcp_flags {
127 /** TCP data transfer interface has been closed */
128 TCP_XFER_CLOSED = 0x0001,
129 /** TCP timestamps are enabled */
130 TCP_TS_ENABLED = 0x0002,
131 /** TCP acknowledgement is pending */
132 TCP_ACK_PENDING = 0x0004,
133 /** TCP selective acknowledgement is enabled */
134 TCP_SACK_ENABLED = 0x0008,
135 };
136
137 /** TCP internal header
138 *
139 * This is the header that replaces the TCP header for packets
140 * enqueued on the receive queue.
141 */
142 struct tcp_rx_queued_header {
143 /** SEQ value, in host-endian order
144 *
145 * This represents the SEQ value at the time the packet is
146 * enqueued, and so excludes the SYN, if present.
147 */
148 uint32_t seq;
149 /** Next SEQ value, in host-endian order */
150 uint32_t nxt;
151 /** Flags
152 *
153 * Only FIN is valid within this flags byte; all other flags
154 * have already been processed by the time the packet is
155 * enqueued.
156 */
157 uint8_t flags;
158 /** Reserved */
159 uint8_t reserved[3];
160 };
161
162 /**
163 * List of registered TCP connections
164 */
165 static LIST_HEAD ( tcp_conns );
166
167 /** Transmit profiler */
168 static struct profiler tcp_tx_profiler __profiler = { .name = "tcp.tx" };
169
170 /** Receive profiler */
171 static struct profiler tcp_rx_profiler __profiler = { .name = "tcp.rx" };
172
173 /** Data transfer profiler */
174 static struct profiler tcp_xfer_profiler __profiler = { .name = "tcp.xfer" };
175
176 /* Forward declarations */
177 static struct process_descriptor tcp_process_desc;
178 static struct interface_descriptor tcp_xfer_desc;
179 static void tcp_expired ( struct retry_timer *timer, int over );
180 static void tcp_wait_expired ( struct retry_timer *timer, int over );
181 static struct tcp_connection * tcp_demux ( unsigned int local_port );
182 static int tcp_rx_ack ( struct tcp_connection *tcp, uint32_t ack,
183 uint32_t win );
184
185 /**
186 * Name TCP state
187 *
188 * @v state TCP state
189 * @ret name Name of TCP state
190 */
191 static inline __attribute__ (( always_inline )) const char *
192 tcp_state ( int state ) {
193 switch ( state ) {
194 case TCP_CLOSED: return "CLOSED";
195 case TCP_LISTEN: return "LISTEN";
196 case TCP_SYN_SENT: return "SYN_SENT";
197 case TCP_SYN_RCVD: return "SYN_RCVD";
198 case TCP_ESTABLISHED: return "ESTABLISHED";
199 case TCP_FIN_WAIT_1: return "FIN_WAIT_1";
200 case TCP_FIN_WAIT_2: return "FIN_WAIT_2";
201 case TCP_CLOSING_OR_LAST_ACK: return "CLOSING/LAST_ACK";
202 case TCP_TIME_WAIT: return "TIME_WAIT";
203 case TCP_CLOSE_WAIT: return "CLOSE_WAIT";
204 default: return "INVALID";
205 }
206 }
207
208 /**
209 * Dump TCP state transition
210 *
211 * @v tcp TCP connection
212 */
213 static inline __attribute__ (( always_inline )) void
214 tcp_dump_state ( struct tcp_connection *tcp ) {
215
216 if ( tcp->tcp_state != tcp->prev_tcp_state ) {
217 DBGC ( tcp, "TCP %p transitioned from %s to %s\n", tcp,
218 tcp_state ( tcp->prev_tcp_state ),
219 tcp_state ( tcp->tcp_state ) );
220 }
221 tcp->prev_tcp_state = tcp->tcp_state;
222 }
223
224 /**
225 * Dump TCP flags
226 *
227 * @v flags TCP flags
228 */
229 static inline __attribute__ (( always_inline )) void
230 tcp_dump_flags ( struct tcp_connection *tcp, unsigned int flags ) {
231 if ( flags & TCP_RST )
232 DBGC2 ( tcp, " RST" );
233 if ( flags & TCP_SYN )
234 DBGC2 ( tcp, " SYN" );
235 if ( flags & TCP_PSH )
236 DBGC2 ( tcp, " PSH" );
237 if ( flags & TCP_FIN )
238 DBGC2 ( tcp, " FIN" );
239 if ( flags & TCP_ACK )
240 DBGC2 ( tcp, " ACK" );
241 }
242
243 /***************************************************************************
244 *
245 * Open and close
246 *
247 ***************************************************************************
248 */
249
250 /**
251 * Check if local TCP port is available
252 *
253 * @v port Local port number
254 * @ret port Local port number, or negative error
255 */
256 static int tcp_port_available ( int port ) {
257
258 return ( tcp_demux ( port ) ? -EADDRINUSE : port );
259 }
260
261 /**
262 * Open a TCP connection
263 *
264 * @v xfer Data transfer interface
265 * @v peer Peer socket address
266 * @v local Local socket address, or NULL
267 * @ret rc Return status code
268 */
269 static int tcp_open ( struct interface *xfer, struct sockaddr *peer,
270 struct sockaddr *local ) {
271 struct sockaddr_tcpip *st_peer = ( struct sockaddr_tcpip * ) peer;
272 struct sockaddr_tcpip *st_local = ( struct sockaddr_tcpip * ) local;
273 struct tcp_connection *tcp;
274 size_t mtu;
275 int port;
276 int rc;
277
278 /* Allocate and initialise structure */
279 tcp = zalloc ( sizeof ( *tcp ) );
280 if ( ! tcp )
281 return -ENOMEM;
282 DBGC ( tcp, "TCP %p allocated\n", tcp );
283 ref_init ( &tcp->refcnt, NULL );
284 intf_init ( &tcp->xfer, &tcp_xfer_desc, &tcp->refcnt );
285 process_init_stopped ( &tcp->process, &tcp_process_desc, &tcp->refcnt );
286 timer_init ( &tcp->timer, tcp_expired, &tcp->refcnt );
287 timer_init ( &tcp->wait, tcp_wait_expired, &tcp->refcnt );
288 tcp->prev_tcp_state = TCP_CLOSED;
289 tcp->tcp_state = TCP_STATE_SENT ( TCP_SYN );
290 tcp_dump_state ( tcp );
291 tcp->snd_seq = random();
292 INIT_LIST_HEAD ( &tcp->tx_queue );
293 INIT_LIST_HEAD ( &tcp->rx_queue );
294 memcpy ( &tcp->peer, st_peer, sizeof ( tcp->peer ) );
295
296 /* Calculate MSS */
297 mtu = tcpip_mtu ( &tcp->peer );
298 if ( ! mtu ) {
299 DBGC ( tcp, "TCP %p has no route to %s\n",
300 tcp, sock_ntoa ( peer ) );
301 rc = -ENETUNREACH;
302 goto err;
303 }
304 tcp->mss = ( mtu - sizeof ( struct tcp_header ) );
305
306 /* Bind to local port */
307 port = tcpip_bind ( st_local, tcp_port_available );
308 if ( port < 0 ) {
309 rc = port;
310 DBGC ( tcp, "TCP %p could not bind: %s\n",
311 tcp, strerror ( rc ) );
312 goto err;
313 }
314 tcp->local_port = port;
315 DBGC ( tcp, "TCP %p bound to port %d\n", tcp, tcp->local_port );
316
317 /* Start timer to initiate SYN */
318 start_timer_nodelay ( &tcp->timer );
319
320 /* Add a pending operation for the SYN */
321 pending_get ( &tcp->pending_flags );
322
323 /* Attach parent interface, transfer reference to connection
324 * list and return
325 */
326 intf_plug_plug ( &tcp->xfer, xfer );
327 list_add ( &tcp->list, &tcp_conns );
328 return 0;
329
330 err:
331 ref_put ( &tcp->refcnt );
332 return rc;
333 }
334
335 /**
336 * Close TCP connection
337 *
338 * @v tcp TCP connection
339 * @v rc Reason for close
340 *
341 * Closes the data transfer interface. If the TCP state machine is in
342 * a suitable state, the connection will be deleted.
343 */
344 static void tcp_close ( struct tcp_connection *tcp, int rc ) {
345 struct io_buffer *iobuf;
346 struct io_buffer *tmp;
347
348 /* Close data transfer interface */
349 intf_shutdown ( &tcp->xfer, rc );
350 tcp->flags |= TCP_XFER_CLOSED;
351
352 /* If we are in CLOSED, or have otherwise not yet received a
353 * SYN (i.e. we are in LISTEN or SYN_SENT), just delete the
354 * connection.
355 */
356 if ( ! ( tcp->tcp_state & TCP_STATE_RCVD ( TCP_SYN ) ) ) {
357
358 /* Transition to CLOSED for the sake of debugging messages */
359 tcp->tcp_state = TCP_CLOSED;
360 tcp_dump_state ( tcp );
361
362 /* Free any unprocessed I/O buffers */
363 list_for_each_entry_safe ( iobuf, tmp, &tcp->rx_queue, list ) {
364 list_del ( &iobuf->list );
365 free_iob ( iobuf );
366 }
367
368 /* Free any unsent I/O buffers */
369 list_for_each_entry_safe ( iobuf, tmp, &tcp->tx_queue, list ) {
370 list_del ( &iobuf->list );
371 free_iob ( iobuf );
372 pending_put ( &tcp->pending_data );
373 }
374 assert ( ! is_pending ( &tcp->pending_data ) );
375
376 /* Remove pending operations for SYN and FIN, if applicable */
377 pending_put ( &tcp->pending_flags );
378 pending_put ( &tcp->pending_flags );
379
380 /* Remove from list and drop reference */
381 process_del ( &tcp->process );
382 stop_timer ( &tcp->timer );
383 stop_timer ( &tcp->wait );
384 list_del ( &tcp->list );
385 ref_put ( &tcp->refcnt );
386 DBGC ( tcp, "TCP %p connection deleted\n", tcp );
387 return;
388 }
389
390 /* If we have not had our SYN acknowledged (i.e. we are in
391 * SYN_RCVD), pretend that it has been acknowledged so that we
392 * can send a FIN without breaking things.
393 */
394 if ( ! ( tcp->tcp_state & TCP_STATE_ACKED ( TCP_SYN ) ) )
395 tcp_rx_ack ( tcp, ( tcp->snd_seq + 1 ), 0 );
396
397 /* If we have no data remaining to send, start sending FIN */
398 if ( list_empty ( &tcp->tx_queue ) &&
399 ! ( tcp->tcp_state & TCP_STATE_SENT ( TCP_FIN ) ) ) {
400
401 tcp->tcp_state |= TCP_STATE_SENT ( TCP_FIN );
402 tcp_dump_state ( tcp );
403 process_add ( &tcp->process );
404
405 /* Add a pending operation for the FIN */
406 pending_get ( &tcp->pending_flags );
407 }
408 }
409
410 /***************************************************************************
411 *
412 * Transmit data path
413 *
414 ***************************************************************************
415 */
416
417 /**
418 * Calculate transmission window
419 *
420 * @v tcp TCP connection
421 * @ret len Maximum length that can be sent in a single packet
422 */
423 static size_t tcp_xmit_win ( struct tcp_connection *tcp ) {
424 size_t len;
425
426 /* Not ready if we're not in a suitable connection state */
427 if ( ! TCP_CAN_SEND_DATA ( tcp->tcp_state ) )
428 return 0;
429
430 /* Length is the minimum of the receiver's window and the path MTU */
431 len = tcp->snd_win;
432 if ( len > TCP_PATH_MTU )
433 len = TCP_PATH_MTU;
434
435 return len;
436 }
437
438 /**
439 * Check data-transfer flow control window
440 *
441 * @v tcp TCP connection
442 * @ret len Length of window
443 */
444 static size_t tcp_xfer_window ( struct tcp_connection *tcp ) {
445
446 /* Not ready if data queue is non-empty. This imposes a limit
447 * of only one unACKed packet in the TX queue at any time; we
448 * do this to conserve memory usage.
449 */
450 if ( ! list_empty ( &tcp->tx_queue ) )
451 return 0;
452
453 /* Return TCP window length */
454 return tcp_xmit_win ( tcp );
455 }
456
457 /**
458 * Find selective acknowledgement block
459 *
460 * @v tcp TCP connection
461 * @v seq SEQ value in SACK block (in host-endian order)
462 * @v sack SACK block to fill in (in host-endian order)
463 * @ret len Length of SACK block
464 */
465 static uint32_t tcp_sack_block ( struct tcp_connection *tcp, uint32_t seq,
466 struct tcp_sack_block *sack ) {
467 struct io_buffer *iobuf;
468 struct tcp_rx_queued_header *tcpqhdr;
469 uint32_t left = tcp->rcv_ack;
470 uint32_t right = left;
471
472 /* Find highest block which does not start after SEQ */
473 list_for_each_entry ( iobuf, &tcp->rx_queue, list ) {
474 tcpqhdr = iobuf->data;
475 if ( tcp_cmp ( tcpqhdr->seq, right ) > 0 ) {
476 if ( tcp_cmp ( tcpqhdr->seq, seq ) > 0 )
477 break;
478 left = tcpqhdr->seq;
479 }
480 if ( tcp_cmp ( tcpqhdr->nxt, right ) > 0 )
481 right = tcpqhdr->nxt;
482 }
483
484 /* Fail if this block does not contain SEQ */
485 if ( tcp_cmp ( right, seq ) < 0 )
486 return 0;
487
488 /* Populate SACK block */
489 sack->left = left;
490 sack->right = right;
491 return ( right - left );
492 }
493
494 /**
495 * Update TCP selective acknowledgement list
496 *
497 * @v tcp TCP connection
498 * @v seq SEQ value in first SACK block (in host-endian order)
499 * @ret count Number of SACK blocks
500 */
501 static unsigned int tcp_sack ( struct tcp_connection *tcp, uint32_t seq ) {
502 struct tcp_sack_block sack[TCP_SACK_MAX];
503 unsigned int old = 0;
504 unsigned int new = 0;
505 unsigned int i;
506 uint32_t len;
507
508 /* Populate first new SACK block */
509 len = tcp_sack_block ( tcp, seq, &sack[0] );
510 if ( len )
511 new++;
512
513 /* Populate remaining new SACK blocks based on old SACK blocks */
514 for ( old = 0 ; old < TCP_SACK_MAX ; old++ ) {
515
516 /* Stop if we run out of space in the new list */
517 if ( new == TCP_SACK_MAX )
518 break;
519
520 /* Skip empty old SACK blocks */
521 if ( tcp->sack[old].left == tcp->sack[old].right )
522 continue;
523
524 /* Populate new SACK block */
525 len = tcp_sack_block ( tcp, tcp->sack[old].left, &sack[new] );
526 if ( len == 0 )
527 continue;
528
529 /* Eliminate duplicates */
530 for ( i = 0 ; i < new ; i++ ) {
531 if ( sack[i].left == sack[new].left ) {
532 new--;
533 break;
534 }
535 }
536 new++;
537 }
538
539 /* Update SACK list */
540 memset ( tcp->sack, 0, sizeof ( tcp->sack ) );
541 memcpy ( tcp->sack, sack, ( new * sizeof ( tcp->sack[0] ) ) );
542 return new;
543 }
544
545 /**
546 * Process TCP transmit queue
547 *
548 * @v tcp TCP connection
549 * @v max_len Maximum length to process
550 * @v dest I/O buffer to fill with data, or NULL
551 * @v remove Remove data from queue
552 * @ret len Length of data processed
553 *
554 * This processes at most @c max_len bytes from the TCP connection's
555 * transmit queue. Data will be copied into the @c dest I/O buffer
556 * (if provided) and, if @c remove is true, removed from the transmit
557 * queue.
558 */
559 static size_t tcp_process_tx_queue ( struct tcp_connection *tcp, size_t max_len,
560 struct io_buffer *dest, int remove ) {
561 struct io_buffer *iobuf;
562 struct io_buffer *tmp;
563 size_t frag_len;
564 size_t len = 0;
565
566 list_for_each_entry_safe ( iobuf, tmp, &tcp->tx_queue, list ) {
567 frag_len = iob_len ( iobuf );
568 if ( frag_len > max_len )
569 frag_len = max_len;
570 if ( dest ) {
571 memcpy ( iob_put ( dest, frag_len ), iobuf->data,
572 frag_len );
573 }
574 if ( remove ) {
575 iob_pull ( iobuf, frag_len );
576 if ( ! iob_len ( iobuf ) ) {
577 list_del ( &iobuf->list );
578 free_iob ( iobuf );
579 pending_put ( &tcp->pending_data );
580 }
581 }
582 len += frag_len;
583 max_len -= frag_len;
584 }
585 return len;
586 }
587
588 /**
589 * Transmit any outstanding data (with selective acknowledgement)
590 *
591 * @v tcp TCP connection
592 * @v sack_seq SEQ for first selective acknowledgement (if any)
593 *
594 * Transmits any outstanding data on the connection.
595 *
596 * Note that even if an error is returned, the retransmission timer
597 * will have been started if necessary, and so the stack will
598 * eventually attempt to retransmit the failed packet.
599 */
600 static void tcp_xmit_sack ( struct tcp_connection *tcp, uint32_t sack_seq ) {
601 struct io_buffer *iobuf;
602 struct tcp_header *tcphdr;
603 struct tcp_mss_option *mssopt;
604 struct tcp_window_scale_padded_option *wsopt;
605 struct tcp_timestamp_padded_option *tsopt;
606 struct tcp_sack_permitted_padded_option *spopt;
607 struct tcp_sack_padded_option *sackopt;
608 struct tcp_sack_block *sack;
609 void *payload;
610 unsigned int flags;
611 unsigned int sack_count;
612 unsigned int i;
613 size_t len = 0;
614 size_t sack_len;
615 uint32_t seq_len;
616 uint32_t max_rcv_win;
617 uint32_t max_representable_win;
618 int rc;
619
620 /* Start profiling */
621 profile_start ( &tcp_tx_profiler );
622
623 /* If retransmission timer is already running, do nothing */
624 if ( timer_running ( &tcp->timer ) )
625 return;
626
627 /* Calculate both the actual (payload) and sequence space
628 * lengths that we wish to transmit.
629 */
630 if ( TCP_CAN_SEND_DATA ( tcp->tcp_state ) ) {
631 len = tcp_process_tx_queue ( tcp, tcp_xmit_win ( tcp ),
632 NULL, 0 );
633 }
634 seq_len = len;
635 flags = TCP_FLAGS_SENDING ( tcp->tcp_state );
636 if ( flags & ( TCP_SYN | TCP_FIN ) ) {
637 /* SYN or FIN consume one byte, and we can never send both */
638 assert ( ! ( ( flags & TCP_SYN ) && ( flags & TCP_FIN ) ) );
639 seq_len++;
640 }
641 tcp->snd_sent = seq_len;
642
643 /* If we have nothing to transmit, stop now */
644 if ( ( seq_len == 0 ) && ! ( tcp->flags & TCP_ACK_PENDING ) )
645 return;
646
647 /* If we are transmitting anything that requires
648 * acknowledgement (i.e. consumes sequence space), start the
649 * retransmission timer. Do this before attempting to
650 * allocate the I/O buffer, in case allocation itself fails.
651 */
652 if ( seq_len )
653 start_timer ( &tcp->timer );
654
655 /* Allocate I/O buffer */
656 iobuf = alloc_iob ( len + TCP_MAX_HEADER_LEN );
657 if ( ! iobuf ) {
658 DBGC ( tcp, "TCP %p could not allocate iobuf for %08x..%08x "
659 "%08x\n", tcp, tcp->snd_seq, ( tcp->snd_seq + seq_len ),
660 tcp->rcv_ack );
661 return;
662 }
663 iob_reserve ( iobuf, TCP_MAX_HEADER_LEN );
664
665 /* Fill data payload from transmit queue */
666 tcp_process_tx_queue ( tcp, len, iobuf, 0 );
667
668 /* Expand receive window if possible */
669 max_rcv_win = xfer_window ( &tcp->xfer );
670 if ( max_rcv_win > TCP_MAX_WINDOW_SIZE )
671 max_rcv_win = TCP_MAX_WINDOW_SIZE;
672 max_representable_win = ( 0xffff << tcp->rcv_win_scale );
673 if ( max_rcv_win > max_representable_win )
674 max_rcv_win = max_representable_win;
675 max_rcv_win &= ~0x03; /* Keep everything dword-aligned */
676 if ( tcp->rcv_win < max_rcv_win )
677 tcp->rcv_win = max_rcv_win;
678
679 /* Fill up the TCP header */
680 payload = iobuf->data;
681 if ( flags & TCP_SYN ) {
682 mssopt = iob_push ( iobuf, sizeof ( *mssopt ) );
683 mssopt->kind = TCP_OPTION_MSS;
684 mssopt->length = sizeof ( *mssopt );
685 mssopt->mss = htons ( tcp->mss );
686 wsopt = iob_push ( iobuf, sizeof ( *wsopt ) );
687 wsopt->nop = TCP_OPTION_NOP;
688 wsopt->wsopt.kind = TCP_OPTION_WS;
689 wsopt->wsopt.length = sizeof ( wsopt->wsopt );
690 wsopt->wsopt.scale = TCP_RX_WINDOW_SCALE;
691 spopt = iob_push ( iobuf, sizeof ( *spopt ) );
692 memset ( spopt->nop, TCP_OPTION_NOP, sizeof ( spopt ) );
693 spopt->spopt.kind = TCP_OPTION_SACK_PERMITTED;
694 spopt->spopt.length = sizeof ( spopt->spopt );
695 }
696 if ( ( flags & TCP_SYN ) || ( tcp->flags & TCP_TS_ENABLED ) ) {
697 tsopt = iob_push ( iobuf, sizeof ( *tsopt ) );
698 memset ( tsopt->nop, TCP_OPTION_NOP, sizeof ( tsopt->nop ) );
699 tsopt->tsopt.kind = TCP_OPTION_TS;
700 tsopt->tsopt.length = sizeof ( tsopt->tsopt );
701 tsopt->tsopt.tsval = htonl ( currticks() );
702 tsopt->tsopt.tsecr = htonl ( tcp->ts_recent );
703 }
704 if ( ( tcp->flags & TCP_SACK_ENABLED ) &&
705 ( ! list_empty ( &tcp->rx_queue ) ) &&
706 ( ( sack_count = tcp_sack ( tcp, sack_seq ) ) != 0 ) ) {
707 sack_len = ( sack_count * sizeof ( *sack ) );
708 sackopt = iob_push ( iobuf, ( sizeof ( *sackopt ) + sack_len ));
709 memset ( sackopt->nop, TCP_OPTION_NOP, sizeof ( sackopt->nop ));
710 sackopt->sackopt.kind = TCP_OPTION_SACK;
711 sackopt->sackopt.length =
712 ( sizeof ( sackopt->sackopt ) + sack_len );
713 sack = ( ( ( void * ) sackopt ) + sizeof ( *sackopt ) );
714 for ( i = 0 ; i < sack_count ; i++, sack++ ) {
715 sack->left = htonl ( tcp->sack[i].left );
716 sack->right = htonl ( tcp->sack[i].right );
717 }
718 }
719 if ( len != 0 )
720 flags |= TCP_PSH;
721 tcphdr = iob_push ( iobuf, sizeof ( *tcphdr ) );
722 memset ( tcphdr, 0, sizeof ( *tcphdr ) );
723 tcphdr->src = htons ( tcp->local_port );
724 tcphdr->dest = tcp->peer.st_port;
725 tcphdr->seq = htonl ( tcp->snd_seq );
726 tcphdr->ack = htonl ( tcp->rcv_ack );
727 tcphdr->hlen = ( ( payload - iobuf->data ) << 2 );
728 tcphdr->flags = flags;
729 tcphdr->win = htons ( tcp->rcv_win >> tcp->rcv_win_scale );
730 tcphdr->csum = tcpip_chksum ( iobuf->data, iob_len ( iobuf ) );
731
732 /* Dump header */
733 DBGC2 ( tcp, "TCP %p TX %d->%d %08x..%08x %08x %4zd",
734 tcp, ntohs ( tcphdr->src ), ntohs ( tcphdr->dest ),
735 ntohl ( tcphdr->seq ), ( ntohl ( tcphdr->seq ) + seq_len ),
736 ntohl ( tcphdr->ack ), len );
737 tcp_dump_flags ( tcp, tcphdr->flags );
738 DBGC2 ( tcp, "\n" );
739
740 /* Transmit packet */
741 if ( ( rc = tcpip_tx ( iobuf, &tcp_protocol, NULL, &tcp->peer, NULL,
742 &tcphdr->csum ) ) != 0 ) {
743 DBGC ( tcp, "TCP %p could not transmit %08x..%08x %08x: %s\n",
744 tcp, tcp->snd_seq, ( tcp->snd_seq + tcp->snd_sent ),
745 tcp->rcv_ack, strerror ( rc ) );
746 return;
747 }
748
749 /* Clear ACK-pending flag */
750 tcp->flags &= ~TCP_ACK_PENDING;
751
752 profile_stop ( &tcp_tx_profiler );
753 }
754
755 /**
756 * Transmit any outstanding data
757 *
758 * @v tcp TCP connection
759 */
760 static void tcp_xmit ( struct tcp_connection *tcp ) {
761
762 /* Transmit without an explicit first SACK */
763 tcp_xmit_sack ( tcp, tcp->rcv_ack );
764 }
765
766 /** TCP process descriptor */
767 static struct process_descriptor tcp_process_desc =
768 PROC_DESC_ONCE ( struct tcp_connection, process, tcp_xmit );
769
770 /**
771 * Retransmission timer expired
772 *
773 * @v timer Retransmission timer
774 * @v over Failure indicator
775 */
776 static void tcp_expired ( struct retry_timer *timer, int over ) {
777 struct tcp_connection *tcp =
778 container_of ( timer, struct tcp_connection, timer );
779
780 DBGC ( tcp, "TCP %p timer %s in %s for %08x..%08x %08x\n", tcp,
781 ( over ? "expired" : "fired" ), tcp_state ( tcp->tcp_state ),
782 tcp->snd_seq, ( tcp->snd_seq + tcp->snd_sent ), tcp->rcv_ack );
783
784 assert ( ( tcp->tcp_state == TCP_SYN_SENT ) ||
785 ( tcp->tcp_state == TCP_SYN_RCVD ) ||
786 ( tcp->tcp_state == TCP_ESTABLISHED ) ||
787 ( tcp->tcp_state == TCP_FIN_WAIT_1 ) ||
788 ( tcp->tcp_state == TCP_CLOSE_WAIT ) ||
789 ( tcp->tcp_state == TCP_CLOSING_OR_LAST_ACK ) );
790
791 if ( over ) {
792 /* If we have finally timed out and given up,
793 * terminate the connection
794 */
795 tcp->tcp_state = TCP_CLOSED;
796 tcp_dump_state ( tcp );
797 tcp_close ( tcp, -ETIMEDOUT );
798 } else {
799 /* Otherwise, retransmit the packet */
800 tcp_xmit ( tcp );
801 }
802 }
803
804 /**
805 * Shutdown timer expired
806 *
807 * @v timer Shutdown timer
808 * @v over Failure indicator
809 */
810 static void tcp_wait_expired ( struct retry_timer *timer, int over __unused ) {
811 struct tcp_connection *tcp =
812 container_of ( timer, struct tcp_connection, wait );
813
814 assert ( tcp->tcp_state == TCP_TIME_WAIT );
815
816 DBGC ( tcp, "TCP %p wait complete in %s for %08x..%08x %08x\n", tcp,
817 tcp_state ( tcp->tcp_state ), tcp->snd_seq,
818 ( tcp->snd_seq + tcp->snd_sent ), tcp->rcv_ack );
819
820 tcp->tcp_state = TCP_CLOSED;
821 tcp_dump_state ( tcp );
822 tcp_close ( tcp, 0 );
823 }
824
825 /**
826 * Send RST response to incoming packet
827 *
828 * @v in_tcphdr TCP header of incoming packet
829 * @ret rc Return status code
830 */
831 static int tcp_xmit_reset ( struct tcp_connection *tcp,
832 struct sockaddr_tcpip *st_dest,
833 struct tcp_header *in_tcphdr ) {
834 struct io_buffer *iobuf;
835 struct tcp_header *tcphdr;
836 int rc;
837
838 /* Allocate space for dataless TX buffer */
839 iobuf = alloc_iob ( TCP_MAX_HEADER_LEN );
840 if ( ! iobuf ) {
841 DBGC ( tcp, "TCP %p could not allocate iobuf for RST "
842 "%08x..%08x %08x\n", tcp, ntohl ( in_tcphdr->ack ),
843 ntohl ( in_tcphdr->ack ), ntohl ( in_tcphdr->seq ) );
844 return -ENOMEM;
845 }
846 iob_reserve ( iobuf, TCP_MAX_HEADER_LEN );
847
848 /* Construct RST response */
849 tcphdr = iob_push ( iobuf, sizeof ( *tcphdr ) );
850 memset ( tcphdr, 0, sizeof ( *tcphdr ) );
851 tcphdr->src = in_tcphdr->dest;
852 tcphdr->dest = in_tcphdr->src;
853 tcphdr->seq = in_tcphdr->ack;
854 tcphdr->ack = in_tcphdr->seq;
855 tcphdr->hlen = ( ( sizeof ( *tcphdr ) / 4 ) << 4 );
856 tcphdr->flags = ( TCP_RST | TCP_ACK );
857 tcphdr->win = htons ( 0 );
858 tcphdr->csum = tcpip_chksum ( iobuf->data, iob_len ( iobuf ) );
859
860 /* Dump header */
861 DBGC2 ( tcp, "TCP %p TX %d->%d %08x..%08x %08x %4d",
862 tcp, ntohs ( tcphdr->src ), ntohs ( tcphdr->dest ),
863 ntohl ( tcphdr->seq ), ( ntohl ( tcphdr->seq ) ),
864 ntohl ( tcphdr->ack ), 0 );
865 tcp_dump_flags ( tcp, tcphdr->flags );
866 DBGC2 ( tcp, "\n" );
867
868 /* Transmit packet */
869 if ( ( rc = tcpip_tx ( iobuf, &tcp_protocol, NULL, st_dest,
870 NULL, &tcphdr->csum ) ) != 0 ) {
871 DBGC ( tcp, "TCP %p could not transmit RST %08x..%08x %08x: "
872 "%s\n", tcp, ntohl ( in_tcphdr->ack ),
873 ntohl ( in_tcphdr->ack ), ntohl ( in_tcphdr->seq ),
874 strerror ( rc ) );
875 return rc;
876 }
877
878 return 0;
879 }
880
881 /***************************************************************************
882 *
883 * Receive data path
884 *
885 ***************************************************************************
886 */
887
888 /**
889 * Identify TCP connection by local port number
890 *
891 * @v local_port Local port
892 * @ret tcp TCP connection, or NULL
893 */
894 static struct tcp_connection * tcp_demux ( unsigned int local_port ) {
895 struct tcp_connection *tcp;
896
897 list_for_each_entry ( tcp, &tcp_conns, list ) {
898 if ( tcp->local_port == local_port )
899 return tcp;
900 }
901 return NULL;
902 }
903
904 /**
905 * Parse TCP received options
906 *
907 * @v tcp TCP connection
908 * @v data Raw options data
909 * @v len Raw options length
910 * @v options Options structure to fill in
911 */
912 static void tcp_rx_opts ( struct tcp_connection *tcp, const void *data,
913 size_t len, struct tcp_options *options ) {
914 const void *end = ( data + len );
915 const struct tcp_option *option;
916 unsigned int kind;
917
918 memset ( options, 0, sizeof ( *options ) );
919 while ( data < end ) {
920 option = data;
921 kind = option->kind;
922 if ( kind == TCP_OPTION_END )
923 return;
924 if ( kind == TCP_OPTION_NOP ) {
925 data++;
926 continue;
927 }
928 switch ( kind ) {
929 case TCP_OPTION_MSS:
930 options->mssopt = data;
931 break;
932 case TCP_OPTION_WS:
933 options->wsopt = data;
934 break;
935 case TCP_OPTION_SACK_PERMITTED:
936 options->spopt = data;
937 break;
938 case TCP_OPTION_SACK:
939 /* Ignore received SACKs */
940 break;
941 case TCP_OPTION_TS:
942 options->tsopt = data;
943 break;
944 default:
945 DBGC ( tcp, "TCP %p received unknown option %d\n",
946 tcp, kind );
947 break;
948 }
949 data += option->length;
950 }
951 }
952
953 /**
954 * Consume received sequence space
955 *
956 * @v tcp TCP connection
957 * @v seq_len Sequence space length to consume
958 */
959 static void tcp_rx_seq ( struct tcp_connection *tcp, uint32_t seq_len ) {
960 unsigned int sack;
961
962 /* Sanity check */
963 assert ( seq_len > 0 );
964
965 /* Update acknowledgement number */
966 tcp->rcv_ack += seq_len;
967
968 /* Update window */
969 if ( tcp->rcv_win > seq_len ) {
970 tcp->rcv_win -= seq_len;
971 } else {
972 tcp->rcv_win = 0;
973 }
974
975 /* Update timestamp */
976 tcp->ts_recent = tcp->ts_val;
977
978 /* Update SACK list */
979 for ( sack = 0 ; sack < TCP_SACK_MAX ; sack++ ) {
980 if ( tcp->sack[sack].left == tcp->sack[sack].right )
981 continue;
982 if ( tcp_cmp ( tcp->sack[sack].left, tcp->rcv_ack ) < 0 )
983 tcp->sack[sack].left = tcp->rcv_ack;
984 if ( tcp_cmp ( tcp->sack[sack].right, tcp->rcv_ack ) < 0 )
985 tcp->sack[sack].right = tcp->rcv_ack;
986 }
987
988 /* Mark ACK as pending */
989 tcp->flags |= TCP_ACK_PENDING;
990 }
991
992 /**
993 * Handle TCP received SYN
994 *
995 * @v tcp TCP connection
996 * @v seq SEQ value (in host-endian order)
997 * @v options TCP options
998 * @ret rc Return status code
999 */
1000 static int tcp_rx_syn ( struct tcp_connection *tcp, uint32_t seq,
1001 struct tcp_options *options ) {
1002
1003 /* Synchronise sequence numbers on first SYN */
1004 if ( ! ( tcp->tcp_state & TCP_STATE_RCVD ( TCP_SYN ) ) ) {
1005 tcp->rcv_ack = seq;
1006 if ( options->tsopt )
1007 tcp->flags |= TCP_TS_ENABLED;
1008 if ( options->spopt )
1009 tcp->flags |= TCP_SACK_ENABLED;
1010 if ( options->wsopt ) {
1011 tcp->snd_win_scale = options->wsopt->scale;
1012 tcp->rcv_win_scale = TCP_RX_WINDOW_SCALE;
1013 }
1014 }
1015
1016 /* Ignore duplicate SYN */
1017 if ( seq != tcp->rcv_ack )
1018 return 0;
1019
1020 /* Acknowledge SYN */
1021 tcp_rx_seq ( tcp, 1 );
1022
1023 /* Mark SYN as received and start sending ACKs with each packet */
1024 tcp->tcp_state |= ( TCP_STATE_SENT ( TCP_ACK ) |
1025 TCP_STATE_RCVD ( TCP_SYN ) );
1026
1027 return 0;
1028 }
1029
1030 /**
1031 * Handle TCP received ACK
1032 *
1033 * @v tcp TCP connection
1034 * @v ack ACK value (in host-endian order)
1035 * @v win WIN value (in host-endian order)
1036 * @ret rc Return status code
1037 */
1038 static int tcp_rx_ack ( struct tcp_connection *tcp, uint32_t ack,
1039 uint32_t win ) {
1040 uint32_t ack_len = ( ack - tcp->snd_seq );
1041 size_t len;
1042 unsigned int acked_flags;
1043
1044 /* Check for out-of-range or old duplicate ACKs */
1045 if ( ack_len > tcp->snd_sent ) {
1046 DBGC ( tcp, "TCP %p received ACK for %08x..%08x, "
1047 "sent only %08x..%08x\n", tcp, tcp->snd_seq,
1048 ( tcp->snd_seq + ack_len ), tcp->snd_seq,
1049 ( tcp->snd_seq + tcp->snd_sent ) );
1050
1051 if ( TCP_HAS_BEEN_ESTABLISHED ( tcp->tcp_state ) ) {
1052 /* Just ignore what might be old duplicate ACKs */
1053 return 0;
1054 } else {
1055 /* Send RST if an out-of-range ACK is received
1056 * on a not-yet-established connection, as per
1057 * RFC 793.
1058 */
1059 return -EINVAL;
1060 }
1061 }
1062
1063 /* Update window size */
1064 tcp->snd_win = win;
1065
1066 /* Ignore ACKs that don't actually acknowledge any new data.
1067 * (In particular, do not stop the retransmission timer; this
1068 * avoids creating a sorceror's apprentice syndrome when a
1069 * duplicate ACK is received and we still have data in our
1070 * transmit queue.)
1071 */
1072 if ( ack_len == 0 )
1073 return 0;
1074
1075 /* Stop the retransmission timer */
1076 stop_timer ( &tcp->timer );
1077
1078 /* Determine acknowledged flags and data length */
1079 len = ack_len;
1080 acked_flags = ( TCP_FLAGS_SENDING ( tcp->tcp_state ) &
1081 ( TCP_SYN | TCP_FIN ) );
1082 if ( acked_flags ) {
1083 len--;
1084 pending_put ( &tcp->pending_flags );
1085 }
1086
1087 /* Update SEQ and sent counters */
1088 tcp->snd_seq = ack;
1089 tcp->snd_sent = 0;
1090
1091 /* Remove any acknowledged data from transmit queue */
1092 tcp_process_tx_queue ( tcp, len, NULL, 1 );
1093
1094 /* Mark SYN/FIN as acknowledged if applicable. */
1095 if ( acked_flags )
1096 tcp->tcp_state |= TCP_STATE_ACKED ( acked_flags );
1097
1098 /* Start sending FIN if we've had all possible data ACKed */
1099 if ( list_empty ( &tcp->tx_queue ) &&
1100 ( tcp->flags & TCP_XFER_CLOSED ) &&
1101 ! ( tcp->tcp_state & TCP_STATE_SENT ( TCP_FIN ) ) ) {
1102 tcp->tcp_state |= TCP_STATE_SENT ( TCP_FIN );
1103 pending_get ( &tcp->pending_flags );
1104 }
1105
1106 return 0;
1107 }
1108
1109 /**
1110 * Handle TCP received data
1111 *
1112 * @v tcp TCP connection
1113 * @v seq SEQ value (in host-endian order)
1114 * @v iobuf I/O buffer
1115 * @ret rc Return status code
1116 *
1117 * This function takes ownership of the I/O buffer.
1118 */
1119 static int tcp_rx_data ( struct tcp_connection *tcp, uint32_t seq,
1120 struct io_buffer *iobuf ) {
1121 uint32_t already_rcvd;
1122 uint32_t len;
1123 int rc;
1124
1125 /* Ignore duplicate or out-of-order data */
1126 already_rcvd = ( tcp->rcv_ack - seq );
1127 len = iob_len ( iobuf );
1128 if ( already_rcvd >= len ) {
1129 free_iob ( iobuf );
1130 return 0;
1131 }
1132 iob_pull ( iobuf, already_rcvd );
1133 len -= already_rcvd;
1134
1135 /* Acknowledge new data */
1136 tcp_rx_seq ( tcp, len );
1137
1138 /* Deliver data to application */
1139 profile_start ( &tcp_xfer_profiler );
1140 if ( ( rc = xfer_deliver_iob ( &tcp->xfer, iobuf ) ) != 0 ) {
1141 DBGC ( tcp, "TCP %p could not deliver %08x..%08x: %s\n",
1142 tcp, seq, ( seq + len ), strerror ( rc ) );
1143 return rc;
1144 }
1145 profile_stop ( &tcp_xfer_profiler );
1146
1147 return 0;
1148 }
1149
1150 /**
1151 * Handle TCP received FIN
1152 *
1153 * @v tcp TCP connection
1154 * @v seq SEQ value (in host-endian order)
1155 * @ret rc Return status code
1156 */
1157 static int tcp_rx_fin ( struct tcp_connection *tcp, uint32_t seq ) {
1158
1159 /* Ignore duplicate or out-of-order FIN */
1160 if ( seq != tcp->rcv_ack )
1161 return 0;
1162
1163 /* Acknowledge FIN */
1164 tcp_rx_seq ( tcp, 1 );
1165
1166 /* Mark FIN as received */
1167 tcp->tcp_state |= TCP_STATE_RCVD ( TCP_FIN );
1168
1169 /* Close connection */
1170 tcp_close ( tcp, 0 );
1171
1172 return 0;
1173 }
1174
1175 /**
1176 * Handle TCP received RST
1177 *
1178 * @v tcp TCP connection
1179 * @v seq SEQ value (in host-endian order)
1180 * @ret rc Return status code
1181 */
1182 static int tcp_rx_rst ( struct tcp_connection *tcp, uint32_t seq ) {
1183
1184 /* Accept RST only if it falls within the window. If we have
1185 * not yet received a SYN, then we have no window to test
1186 * against, so fall back to checking that our SYN has been
1187 * ACKed.
1188 */
1189 if ( tcp->tcp_state & TCP_STATE_RCVD ( TCP_SYN ) ) {
1190 if ( ! tcp_in_window ( seq, tcp->rcv_ack, tcp->rcv_win ) )
1191 return 0;
1192 } else {
1193 if ( ! ( tcp->tcp_state & TCP_STATE_ACKED ( TCP_SYN ) ) )
1194 return 0;
1195 }
1196
1197 /* Abort connection */
1198 tcp->tcp_state = TCP_CLOSED;
1199 tcp_dump_state ( tcp );
1200 tcp_close ( tcp, -ECONNRESET );
1201
1202 DBGC ( tcp, "TCP %p connection reset by peer\n", tcp );
1203 return -ECONNRESET;
1204 }
1205
1206 /**
1207 * Enqueue received TCP packet
1208 *
1209 * @v tcp TCP connection
1210 * @v seq SEQ value (in host-endian order)
1211 * @v flags TCP flags
1212 * @v iobuf I/O buffer
1213 */
1214 static void tcp_rx_enqueue ( struct tcp_connection *tcp, uint32_t seq,
1215 uint8_t flags, struct io_buffer *iobuf ) {
1216 struct tcp_rx_queued_header *tcpqhdr;
1217 struct io_buffer *queued;
1218 size_t len;
1219 uint32_t seq_len;
1220 uint32_t nxt;
1221
1222 /* Calculate remaining flags and sequence length. Note that
1223 * SYN, if present, has already been processed by this point.
1224 */
1225 flags &= TCP_FIN;
1226 len = iob_len ( iobuf );
1227 seq_len = ( len + ( flags ? 1 : 0 ) );
1228 nxt = ( seq + seq_len );
1229
1230 /* Discard immediately (to save memory) if:
1231 *
1232 * a) we have not yet received a SYN (and so have no defined
1233 * receive window), or
1234 * b) the packet lies entirely outside the receive window, or
1235 * c) there is no further content to process.
1236 */
1237 if ( ( ! ( tcp->tcp_state & TCP_STATE_RCVD ( TCP_SYN ) ) ) ||
1238 ( tcp_cmp ( seq, tcp->rcv_ack + tcp->rcv_win ) >= 0 ) ||
1239 ( tcp_cmp ( nxt, tcp->rcv_ack ) < 0 ) ||
1240 ( seq_len == 0 ) ) {
1241 free_iob ( iobuf );
1242 return;
1243 }
1244
1245 /* Add internal header */
1246 tcpqhdr = iob_push ( iobuf, sizeof ( *tcpqhdr ) );
1247 tcpqhdr->seq = seq;
1248 tcpqhdr->nxt = nxt;
1249 tcpqhdr->flags = flags;
1250
1251 /* Add to RX queue */
1252 list_for_each_entry ( queued, &tcp->rx_queue, list ) {
1253 tcpqhdr = queued->data;
1254 if ( tcp_cmp ( seq, tcpqhdr->seq ) < 0 )
1255 break;
1256 }
1257 list_add_tail ( &iobuf->list, &queued->list );
1258 }
1259
1260 /**
1261 * Process receive queue
1262 *
1263 * @v tcp TCP connection
1264 */
1265 static void tcp_process_rx_queue ( struct tcp_connection *tcp ) {
1266 struct io_buffer *iobuf;
1267 struct tcp_rx_queued_header *tcpqhdr;
1268 uint32_t seq;
1269 unsigned int flags;
1270 size_t len;
1271
1272 /* Process all applicable received buffers. Note that we
1273 * cannot use list_for_each_entry() to iterate over the RX
1274 * queue, since tcp_discard() may remove packets from the RX
1275 * queue while we are processing.
1276 */
1277 while ( ( iobuf = list_first_entry ( &tcp->rx_queue, struct io_buffer,
1278 list ) ) ) {
1279
1280 /* Stop processing when we hit the first gap */
1281 tcpqhdr = iobuf->data;
1282 if ( tcp_cmp ( tcpqhdr->seq, tcp->rcv_ack ) > 0 )
1283 break;
1284
1285 /* Strip internal header and remove from RX queue */
1286 list_del ( &iobuf->list );
1287 seq = tcpqhdr->seq;
1288 flags = tcpqhdr->flags;
1289 iob_pull ( iobuf, sizeof ( *tcpqhdr ) );
1290 len = iob_len ( iobuf );
1291
1292 /* Handle new data, if any */
1293 tcp_rx_data ( tcp, seq, iob_disown ( iobuf ) );
1294 seq += len;
1295
1296 /* Handle FIN, if present */
1297 if ( flags & TCP_FIN ) {
1298 tcp_rx_fin ( tcp, seq );
1299 seq++;
1300 }
1301 }
1302 }
1303
1304 /**
1305 * Process received packet
1306 *
1307 * @v iobuf I/O buffer
1308 * @v netdev Network device
1309 * @v st_src Partially-filled source address
1310 * @v st_dest Partially-filled destination address
1311 * @v pshdr_csum Pseudo-header checksum
1312 * @ret rc Return status code
1313 */
1314 static int tcp_rx ( struct io_buffer *iobuf,
1315 struct net_device *netdev __unused,
1316 struct sockaddr_tcpip *st_src,
1317 struct sockaddr_tcpip *st_dest __unused,
1318 uint16_t pshdr_csum ) {
1319 struct tcp_header *tcphdr = iobuf->data;
1320 struct tcp_connection *tcp;
1321 struct tcp_options options;
1322 size_t hlen;
1323 uint16_t csum;
1324 uint32_t seq;
1325 uint32_t ack;
1326 uint16_t raw_win;
1327 uint32_t win;
1328 unsigned int flags;
1329 size_t len;
1330 uint32_t seq_len;
1331 size_t old_xfer_window;
1332 int rc;
1333
1334 /* Start profiling */
1335 profile_start ( &tcp_rx_profiler );
1336
1337 /* Sanity check packet */
1338 if ( iob_len ( iobuf ) < sizeof ( *tcphdr ) ) {
1339 DBG ( "TCP packet too short at %zd bytes (min %zd bytes)\n",
1340 iob_len ( iobuf ), sizeof ( *tcphdr ) );
1341 rc = -EINVAL;
1342 goto discard;
1343 }
1344 hlen = ( ( tcphdr->hlen & TCP_MASK_HLEN ) / 16 ) * 4;
1345 if ( hlen < sizeof ( *tcphdr ) ) {
1346 DBG ( "TCP header too short at %zd bytes (min %zd bytes)\n",
1347 hlen, sizeof ( *tcphdr ) );
1348 rc = -EINVAL;
1349 goto discard;
1350 }
1351 if ( hlen > iob_len ( iobuf ) ) {
1352 DBG ( "TCP header too long at %zd bytes (max %zd bytes)\n",
1353 hlen, iob_len ( iobuf ) );
1354 rc = -EINVAL;
1355 goto discard;
1356 }
1357 csum = tcpip_continue_chksum ( pshdr_csum, iobuf->data,
1358 iob_len ( iobuf ) );
1359 if ( csum != 0 ) {
1360 DBG ( "TCP checksum incorrect (is %04x including checksum "
1361 "field, should be 0000)\n", csum );
1362 rc = -EINVAL;
1363 goto discard;
1364 }
1365
1366 /* Parse parameters from header and strip header */
1367 tcp = tcp_demux ( ntohs ( tcphdr->dest ) );
1368 seq = ntohl ( tcphdr->seq );
1369 ack = ntohl ( tcphdr->ack );
1370 raw_win = ntohs ( tcphdr->win );
1371 flags = tcphdr->flags;
1372 tcp_rx_opts ( tcp, ( ( ( void * ) tcphdr ) + sizeof ( *tcphdr ) ),
1373 ( hlen - sizeof ( *tcphdr ) ), &options );
1374 if ( tcp && options.tsopt )
1375 tcp->ts_val = ntohl ( options.tsopt->tsval );
1376 iob_pull ( iobuf, hlen );
1377 len = iob_len ( iobuf );
1378 seq_len = ( len + ( ( flags & TCP_SYN ) ? 1 : 0 ) +
1379 ( ( flags & TCP_FIN ) ? 1 : 0 ) );
1380
1381 /* Dump header */
1382 DBGC2 ( tcp, "TCP %p RX %d<-%d %08x %08x..%08x %4zd",
1383 tcp, ntohs ( tcphdr->dest ), ntohs ( tcphdr->src ),
1384 ntohl ( tcphdr->ack ), ntohl ( tcphdr->seq ),
1385 ( ntohl ( tcphdr->seq ) + seq_len ), len );
1386 tcp_dump_flags ( tcp, tcphdr->flags );
1387 DBGC2 ( tcp, "\n" );
1388
1389 /* If no connection was found, silently drop packet */
1390 if ( ! tcp ) {
1391 rc = -ENOTCONN;
1392 goto discard;
1393 }
1394
1395 /* Record old data-transfer window */
1396 old_xfer_window = tcp_xfer_window ( tcp );
1397
1398 /* Handle ACK, if present */
1399 if ( flags & TCP_ACK ) {
1400 win = ( raw_win << tcp->snd_win_scale );
1401 if ( ( rc = tcp_rx_ack ( tcp, ack, win ) ) != 0 ) {
1402 tcp_xmit_reset ( tcp, st_src, tcphdr );
1403 goto discard;
1404 }
1405 }
1406
1407 /* Force an ACK if this packet is out of order */
1408 if ( ( tcp->tcp_state & TCP_STATE_RCVD ( TCP_SYN ) ) &&
1409 ( seq != tcp->rcv_ack ) ) {
1410 tcp->flags |= TCP_ACK_PENDING;
1411 }
1412
1413 /* Handle SYN, if present */
1414 if ( flags & TCP_SYN ) {
1415 tcp_rx_syn ( tcp, seq, &options );
1416 seq++;
1417 }
1418
1419 /* Handle RST, if present */
1420 if ( flags & TCP_RST ) {
1421 if ( ( rc = tcp_rx_rst ( tcp, seq ) ) != 0 )
1422 goto discard;
1423 }
1424
1425 /* Enqueue received data */
1426 tcp_rx_enqueue ( tcp, seq, flags, iob_disown ( iobuf ) );
1427
1428 /* Process receive queue */
1429 tcp_process_rx_queue ( tcp );
1430
1431 /* Dump out any state change as a result of the received packet */
1432 tcp_dump_state ( tcp );
1433
1434 /* Schedule transmission of ACK (and any pending data). If we
1435 * have received any out-of-order packets (i.e. if the receive
1436 * queue remains non-empty after processing) then send the ACK
1437 * immediately in order to trigger Fast Retransmission.
1438 */
1439 if ( list_empty ( &tcp->rx_queue ) ) {
1440 process_add ( &tcp->process );
1441 } else {
1442 tcp_xmit_sack ( tcp, seq );
1443 }
1444
1445 /* If this packet was the last we expect to receive, set up
1446 * timer to expire and cause the connection to be freed.
1447 */
1448 if ( TCP_CLOSED_GRACEFULLY ( tcp->tcp_state ) ) {
1449 stop_timer ( &tcp->wait );
1450 start_timer_fixed ( &tcp->wait, ( 2 * TCP_MSL ) );
1451 }
1452
1453 /* Notify application if window has changed */
1454 if ( tcp_xfer_window ( tcp ) != old_xfer_window )
1455 xfer_window_changed ( &tcp->xfer );
1456
1457 profile_stop ( &tcp_rx_profiler );
1458 return 0;
1459
1460 discard:
1461 /* Free received packet */
1462 free_iob ( iobuf );
1463 return rc;
1464 }
1465
1466 /** TCP protocol */
1467 struct tcpip_protocol tcp_protocol __tcpip_protocol = {
1468 .name = "TCP",
1469 .rx = tcp_rx,
1470 .tcpip_proto = IP_TCP,
1471 };
1472
1473 /**
1474 * Discard some cached TCP data
1475 *
1476 * @ret discarded Number of cached items discarded
1477 */
1478 static unsigned int tcp_discard ( void ) {
1479 struct tcp_connection *tcp;
1480 struct io_buffer *iobuf;
1481 unsigned int discarded = 0;
1482
1483 /* Try to drop one queued RX packet from each connection */
1484 list_for_each_entry ( tcp, &tcp_conns, list ) {
1485 list_for_each_entry_reverse ( iobuf, &tcp->rx_queue, list ) {
1486
1487 /* Remove packet from queue */
1488 list_del ( &iobuf->list );
1489 free_iob ( iobuf );
1490
1491 /* Report discard */
1492 discarded++;
1493 break;
1494 }
1495 }
1496
1497 return discarded;
1498 }
1499
1500 /** TCP cache discarder */
1501 struct cache_discarder tcp_discarder __cache_discarder ( CACHE_NORMAL ) = {
1502 .discard = tcp_discard,
1503 };
1504
1505 /**
1506 * Find first TCP connection that has not yet been closed
1507 *
1508 * @ret tcp First unclosed connection, or NULL
1509 */
1510 static struct tcp_connection * tcp_first_unclosed ( void ) {
1511 struct tcp_connection *tcp;
1512
1513 /* Find first connection which has not yet been closed */
1514 list_for_each_entry ( tcp, &tcp_conns, list ) {
1515 if ( ! ( tcp->flags & TCP_XFER_CLOSED ) )
1516 return tcp;
1517 }
1518 return NULL;
1519 }
1520
1521 /**
1522 * Find first TCP connection that has not yet finished all operations
1523 *
1524 * @ret tcp First unfinished connection, or NULL
1525 */
1526 static struct tcp_connection * tcp_first_unfinished ( void ) {
1527 struct tcp_connection *tcp;
1528
1529 /* Find first connection which has not yet closed gracefully,
1530 * or which still has a pending transmission (e.g. to ACK the
1531 * received FIN).
1532 */
1533 list_for_each_entry ( tcp, &tcp_conns, list ) {
1534 if ( ( ! TCP_CLOSED_GRACEFULLY ( tcp->tcp_state ) ) ||
1535 process_running ( &tcp->process ) ) {
1536 return tcp;
1537 }
1538 }
1539 return NULL;
1540 }
1541
1542 /**
1543 * Shut down all TCP connections
1544 *
1545 */
1546 static void tcp_shutdown ( int booting __unused ) {
1547 struct tcp_connection *tcp;
1548 unsigned long start;
1549 unsigned long elapsed;
1550
1551 /* Initiate a graceful close of all connections, allowing for
1552 * the fact that the connection list may change as we do so.
1553 */
1554 while ( ( tcp = tcp_first_unclosed() ) ) {
1555 DBGC ( tcp, "TCP %p closing for shutdown\n", tcp );
1556 tcp_close ( tcp, -ECANCELED );
1557 }
1558
1559 /* Wait for all connections to finish closing gracefully */
1560 start = currticks();
1561 while ( ( tcp = tcp_first_unfinished() ) &&
1562 ( ( elapsed = ( currticks() - start ) ) < TCP_FINISH_TIMEOUT )){
1563 step();
1564 }
1565
1566 /* Forcibly close any remaining connections */
1567 while ( ( tcp = list_first_entry ( &tcp_conns, struct tcp_connection,
1568 list ) ) != NULL ) {
1569 tcp->tcp_state = TCP_CLOSED;
1570 tcp_dump_state ( tcp );
1571 tcp_close ( tcp, -ECANCELED );
1572 }
1573 }
1574
1575 /** TCP shutdown function */
1576 struct startup_fn tcp_startup_fn __startup_fn ( STARTUP_LATE ) = {
1577 .shutdown = tcp_shutdown,
1578 };
1579
1580 /***************************************************************************
1581 *
1582 * Data transfer interface
1583 *
1584 ***************************************************************************
1585 */
1586
1587 /**
1588 * Close interface
1589 *
1590 * @v tcp TCP connection
1591 * @v rc Reason for close
1592 */
1593 static void tcp_xfer_close ( struct tcp_connection *tcp, int rc ) {
1594
1595 /* Close data transfer interface */
1596 tcp_close ( tcp, rc );
1597
1598 /* Transmit FIN, if possible */
1599 tcp_xmit ( tcp );
1600 }
1601
1602 /**
1603 * Deliver datagram as I/O buffer
1604 *
1605 * @v tcp TCP connection
1606 * @v iobuf Datagram I/O buffer
1607 * @v meta Data transfer metadata
1608 * @ret rc Return status code
1609 */
1610 static int tcp_xfer_deliver ( struct tcp_connection *tcp,
1611 struct io_buffer *iobuf,
1612 struct xfer_metadata *meta __unused ) {
1613
1614 /* Enqueue packet */
1615 list_add_tail ( &iobuf->list, &tcp->tx_queue );
1616
1617 /* Each enqueued packet is a pending operation */
1618 pending_get ( &tcp->pending_data );
1619
1620 /* Transmit data, if possible */
1621 tcp_xmit ( tcp );
1622
1623 return 0;
1624 }
1625
1626 /** TCP data transfer interface operations */
1627 static struct interface_operation tcp_xfer_operations[] = {
1628 INTF_OP ( xfer_deliver, struct tcp_connection *, tcp_xfer_deliver ),
1629 INTF_OP ( xfer_window, struct tcp_connection *, tcp_xfer_window ),
1630 INTF_OP ( intf_close, struct tcp_connection *, tcp_xfer_close ),
1631 };
1632
1633 /** TCP data transfer interface descriptor */
1634 static struct interface_descriptor tcp_xfer_desc =
1635 INTF_DESC ( struct tcp_connection, xfer, tcp_xfer_operations );
1636
1637 /***************************************************************************
1638 *
1639 * Openers
1640 *
1641 ***************************************************************************
1642 */
1643
1644 /** TCP IPv4 socket opener */
1645 struct socket_opener tcp_ipv4_socket_opener __socket_opener = {
1646 .semantics = TCP_SOCK_STREAM,
1647 .family = AF_INET,
1648 .open = tcp_open,
1649 };
1650
1651 /** TCP IPv6 socket opener */
1652 struct socket_opener tcp_ipv6_socket_opener __socket_opener = {
1653 .semantics = TCP_SOCK_STREAM,
1654 .family = AF_INET6,
1655 .open = tcp_open,
1656 };
1657
1658 /** Linkage hack */
1659 int tcp_sock_stream = TCP_SOCK_STREAM;
1660
1661 /**
1662 * Open TCP URI
1663 *
1664 * @v xfer Data transfer interface
1665 * @v uri URI
1666 * @ret rc Return status code
1667 */
1668 static int tcp_open_uri ( struct interface *xfer, struct uri *uri ) {
1669 struct sockaddr_tcpip peer;
1670
1671 /* Sanity check */
1672 if ( ! uri->host )
1673 return -EINVAL;
1674
1675 memset ( &peer, 0, sizeof ( peer ) );
1676 peer.st_port = htons ( uri_port ( uri, 0 ) );
1677 return xfer_open_named_socket ( xfer, SOCK_STREAM,
1678 ( struct sockaddr * ) &peer,
1679 uri->host, NULL );
1680 }
1681
1682 /** TCP URI opener */
1683 struct uri_opener tcp_uri_opener __uri_opener = {
1684 .scheme = "tcp",
1685 .open = tcp_open_uri,
1686 };
1687