[prefix] Use garbage-collectable section names
[ipxe.git] / src / net / tcp.c
1 #include <string.h>
2 #include <stdlib.h>
3 #include <stdio.h>
4 #include <assert.h>
5 #include <errno.h>
6 #include <byteswap.h>
7 #include <ipxe/timer.h>
8 #include <ipxe/iobuf.h>
9 #include <ipxe/malloc.h>
10 #include <ipxe/init.h>
11 #include <ipxe/retry.h>
12 #include <ipxe/refcnt.h>
13 #include <ipxe/pending.h>
14 #include <ipxe/xfer.h>
15 #include <ipxe/open.h>
16 #include <ipxe/uri.h>
17 #include <ipxe/netdevice.h>
18 #include <ipxe/profile.h>
19 #include <ipxe/process.h>
20 #include <ipxe/tcpip.h>
21 #include <ipxe/tcp.h>
22
23 /** @file
24 *
25 * TCP protocol
26 *
27 */
28
29 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
30
31 /** A TCP connection */
32 struct tcp_connection {
33 /** Reference counter */
34 struct refcnt refcnt;
35 /** List of TCP connections */
36 struct list_head list;
37
38 /** Flags */
39 unsigned int flags;
40
41 /** Data transfer interface */
42 struct interface xfer;
43
44 /** Remote socket address */
45 struct sockaddr_tcpip peer;
46 /** Local port */
47 unsigned int local_port;
48 /** Maximum segment size */
49 size_t mss;
50
51 /** Current TCP state */
52 unsigned int tcp_state;
53 /** Previous TCP state
54 *
55 * Maintained only for debug messages
56 */
57 unsigned int prev_tcp_state;
58 /** Current sequence number
59 *
60 * Equivalent to SND.UNA in RFC 793 terminology.
61 */
62 uint32_t snd_seq;
63 /** Unacknowledged sequence count
64 *
65 * Equivalent to (SND.NXT-SND.UNA) in RFC 793 terminology.
66 */
67 uint32_t snd_sent;
68 /** Send window
69 *
70 * Equivalent to SND.WND in RFC 793 terminology
71 */
72 uint32_t snd_win;
73 /** Current acknowledgement number
74 *
75 * Equivalent to RCV.NXT in RFC 793 terminology.
76 */
77 uint32_t rcv_ack;
78 /** Receive window
79 *
80 * Equivalent to RCV.WND in RFC 793 terminology.
81 */
82 uint32_t rcv_win;
83 /** Received timestamp value
84 *
85 * Updated when a packet is received; copied to ts_recent when
86 * the window is advanced.
87 */
88 uint32_t ts_val;
89 /** Most recent received timestamp that advanced the window
90 *
91 * Equivalent to TS.Recent in RFC 1323 terminology.
92 */
93 uint32_t ts_recent;
94 /** Send window scale
95 *
96 * Equivalent to Snd.Wind.Scale in RFC 1323 terminology
97 */
98 uint8_t snd_win_scale;
99 /** Receive window scale
100 *
101 * Equivalent to Rcv.Wind.Scale in RFC 1323 terminology
102 */
103 uint8_t rcv_win_scale;
104
105 /** Selective acknowledgement list (in host-endian order) */
106 struct tcp_sack_block sack[TCP_SACK_MAX];
107
108 /** Transmit queue */
109 struct list_head tx_queue;
110 /** Receive queue */
111 struct list_head rx_queue;
112 /** Transmission process */
113 struct process process;
114 /** Retransmission timer */
115 struct retry_timer timer;
116 /** Shutdown (TIME_WAIT) timer */
117 struct retry_timer wait;
118
119 /** Pending operations for SYN and FIN */
120 struct pending_operation pending_flags;
121 /** Pending operations for transmit queue */
122 struct pending_operation pending_data;
123 };
124
125 /** TCP flags */
126 enum tcp_flags {
127 /** TCP data transfer interface has been closed */
128 TCP_XFER_CLOSED = 0x0001,
129 /** TCP timestamps are enabled */
130 TCP_TS_ENABLED = 0x0002,
131 /** TCP acknowledgement is pending */
132 TCP_ACK_PENDING = 0x0004,
133 /** TCP selective acknowledgement is enabled */
134 TCP_SACK_ENABLED = 0x0008,
135 };
136
137 /** TCP internal header
138 *
139 * This is the header that replaces the TCP header for packets
140 * enqueued on the receive queue.
141 */
142 struct tcp_rx_queued_header {
143 /** SEQ value, in host-endian order
144 *
145 * This represents the SEQ value at the time the packet is
146 * enqueued, and so excludes the SYN, if present.
147 */
148 uint32_t seq;
149 /** Next SEQ value, in host-endian order */
150 uint32_t nxt;
151 /** Flags
152 *
153 * Only FIN is valid within this flags byte; all other flags
154 * have already been processed by the time the packet is
155 * enqueued.
156 */
157 uint8_t flags;
158 /** Reserved */
159 uint8_t reserved[3];
160 };
161
162 /**
163 * List of registered TCP connections
164 */
165 static LIST_HEAD ( tcp_conns );
166
167 /** Transmit profiler */
168 static struct profiler tcp_tx_profiler __profiler = { .name = "tcp.tx" };
169
170 /** Receive profiler */
171 static struct profiler tcp_rx_profiler __profiler = { .name = "tcp.rx" };
172
173 /** Data transfer profiler */
174 static struct profiler tcp_xfer_profiler __profiler = { .name = "tcp.xfer" };
175
176 /* Forward declarations */
177 static struct process_descriptor tcp_process_desc;
178 static struct interface_descriptor tcp_xfer_desc;
179 static void tcp_expired ( struct retry_timer *timer, int over );
180 static void tcp_wait_expired ( struct retry_timer *timer, int over );
181 static struct tcp_connection * tcp_demux ( unsigned int local_port );
182 static int tcp_rx_ack ( struct tcp_connection *tcp, uint32_t ack,
183 uint32_t win );
184
185 /**
186 * Name TCP state
187 *
188 * @v state TCP state
189 * @ret name Name of TCP state
190 */
191 static inline __attribute__ (( always_inline )) const char *
192 tcp_state ( int state ) {
193 switch ( state ) {
194 case TCP_CLOSED: return "CLOSED";
195 case TCP_LISTEN: return "LISTEN";
196 case TCP_SYN_SENT: return "SYN_SENT";
197 case TCP_SYN_RCVD: return "SYN_RCVD";
198 case TCP_ESTABLISHED: return "ESTABLISHED";
199 case TCP_FIN_WAIT_1: return "FIN_WAIT_1";
200 case TCP_FIN_WAIT_2: return "FIN_WAIT_2";
201 case TCP_CLOSING_OR_LAST_ACK: return "CLOSING/LAST_ACK";
202 case TCP_TIME_WAIT: return "TIME_WAIT";
203 case TCP_CLOSE_WAIT: return "CLOSE_WAIT";
204 default: return "INVALID";
205 }
206 }
207
208 /**
209 * Dump TCP state transition
210 *
211 * @v tcp TCP connection
212 */
213 static inline __attribute__ (( always_inline )) void
214 tcp_dump_state ( struct tcp_connection *tcp ) {
215
216 if ( tcp->tcp_state != tcp->prev_tcp_state ) {
217 DBGC ( tcp, "TCP %p transitioned from %s to %s\n", tcp,
218 tcp_state ( tcp->prev_tcp_state ),
219 tcp_state ( tcp->tcp_state ) );
220 }
221 tcp->prev_tcp_state = tcp->tcp_state;
222 }
223
224 /**
225 * Dump TCP flags
226 *
227 * @v flags TCP flags
228 */
229 static inline __attribute__ (( always_inline )) void
230 tcp_dump_flags ( struct tcp_connection *tcp, unsigned int flags ) {
231 if ( flags & TCP_RST )
232 DBGC2 ( tcp, " RST" );
233 if ( flags & TCP_SYN )
234 DBGC2 ( tcp, " SYN" );
235 if ( flags & TCP_PSH )
236 DBGC2 ( tcp, " PSH" );
237 if ( flags & TCP_FIN )
238 DBGC2 ( tcp, " FIN" );
239 if ( flags & TCP_ACK )
240 DBGC2 ( tcp, " ACK" );
241 }
242
243 /***************************************************************************
244 *
245 * Open and close
246 *
247 ***************************************************************************
248 */
249
250 /**
251 * Check if local TCP port is available
252 *
253 * @v port Local port number
254 * @ret port Local port number, or negative error
255 */
256 static int tcp_port_available ( int port ) {
257
258 return ( tcp_demux ( port ) ? -EADDRINUSE : port );
259 }
260
261 /**
262 * Open a TCP connection
263 *
264 * @v xfer Data transfer interface
265 * @v peer Peer socket address
266 * @v local Local socket address, or NULL
267 * @ret rc Return status code
268 */
269 static int tcp_open ( struct interface *xfer, struct sockaddr *peer,
270 struct sockaddr *local ) {
271 struct sockaddr_tcpip *st_peer = ( struct sockaddr_tcpip * ) peer;
272 struct sockaddr_tcpip *st_local = ( struct sockaddr_tcpip * ) local;
273 struct tcp_connection *tcp;
274 size_t mtu;
275 int port;
276 int rc;
277
278 /* Allocate and initialise structure */
279 tcp = zalloc ( sizeof ( *tcp ) );
280 if ( ! tcp )
281 return -ENOMEM;
282 DBGC ( tcp, "TCP %p allocated\n", tcp );
283 ref_init ( &tcp->refcnt, NULL );
284 intf_init ( &tcp->xfer, &tcp_xfer_desc, &tcp->refcnt );
285 process_init_stopped ( &tcp->process, &tcp_process_desc, &tcp->refcnt );
286 timer_init ( &tcp->timer, tcp_expired, &tcp->refcnt );
287 timer_init ( &tcp->wait, tcp_wait_expired, &tcp->refcnt );
288 tcp->prev_tcp_state = TCP_CLOSED;
289 tcp->tcp_state = TCP_STATE_SENT ( TCP_SYN );
290 tcp_dump_state ( tcp );
291 tcp->snd_seq = random();
292 INIT_LIST_HEAD ( &tcp->tx_queue );
293 INIT_LIST_HEAD ( &tcp->rx_queue );
294 memcpy ( &tcp->peer, st_peer, sizeof ( tcp->peer ) );
295
296 /* Calculate MSS */
297 mtu = tcpip_mtu ( &tcp->peer );
298 if ( ! mtu ) {
299 DBGC ( tcp, "TCP %p has no route to %s\n",
300 tcp, sock_ntoa ( peer ) );
301 rc = -ENETUNREACH;
302 goto err;
303 }
304 tcp->mss = ( mtu - sizeof ( struct tcp_header ) );
305
306 /* Bind to local port */
307 port = tcpip_bind ( st_local, tcp_port_available );
308 if ( port < 0 ) {
309 rc = port;
310 DBGC ( tcp, "TCP %p could not bind: %s\n",
311 tcp, strerror ( rc ) );
312 goto err;
313 }
314 tcp->local_port = port;
315 DBGC ( tcp, "TCP %p bound to port %d\n", tcp, tcp->local_port );
316
317 /* Start timer to initiate SYN */
318 start_timer_nodelay ( &tcp->timer );
319
320 /* Add a pending operation for the SYN */
321 pending_get ( &tcp->pending_flags );
322
323 /* Attach parent interface, transfer reference to connection
324 * list and return
325 */
326 intf_plug_plug ( &tcp->xfer, xfer );
327 list_add ( &tcp->list, &tcp_conns );
328 return 0;
329
330 err:
331 ref_put ( &tcp->refcnt );
332 return rc;
333 }
334
335 /**
336 * Close TCP connection
337 *
338 * @v tcp TCP connection
339 * @v rc Reason for close
340 *
341 * Closes the data transfer interface. If the TCP state machine is in
342 * a suitable state, the connection will be deleted.
343 */
344 static void tcp_close ( struct tcp_connection *tcp, int rc ) {
345 struct io_buffer *iobuf;
346 struct io_buffer *tmp;
347
348 /* Close data transfer interface */
349 intf_shutdown ( &tcp->xfer, rc );
350 tcp->flags |= TCP_XFER_CLOSED;
351
352 /* If we are in CLOSED, or have otherwise not yet received a
353 * SYN (i.e. we are in LISTEN or SYN_SENT), just delete the
354 * connection.
355 */
356 if ( ! ( tcp->tcp_state & TCP_STATE_RCVD ( TCP_SYN ) ) ) {
357
358 /* Transition to CLOSED for the sake of debugging messages */
359 tcp->tcp_state = TCP_CLOSED;
360 tcp_dump_state ( tcp );
361
362 /* Free any unprocessed I/O buffers */
363 list_for_each_entry_safe ( iobuf, tmp, &tcp->rx_queue, list ) {
364 list_del ( &iobuf->list );
365 free_iob ( iobuf );
366 }
367
368 /* Free any unsent I/O buffers */
369 list_for_each_entry_safe ( iobuf, tmp, &tcp->tx_queue, list ) {
370 list_del ( &iobuf->list );
371 free_iob ( iobuf );
372 pending_put ( &tcp->pending_data );
373 }
374 assert ( ! is_pending ( &tcp->pending_data ) );
375
376 /* Remove pending operations for SYN and FIN, if applicable */
377 pending_put ( &tcp->pending_flags );
378 pending_put ( &tcp->pending_flags );
379
380 /* Remove from list and drop reference */
381 process_del ( &tcp->process );
382 stop_timer ( &tcp->timer );
383 stop_timer ( &tcp->wait );
384 list_del ( &tcp->list );
385 ref_put ( &tcp->refcnt );
386 DBGC ( tcp, "TCP %p connection deleted\n", tcp );
387 return;
388 }
389
390 /* If we have not had our SYN acknowledged (i.e. we are in
391 * SYN_RCVD), pretend that it has been acknowledged so that we
392 * can send a FIN without breaking things.
393 */
394 if ( ! ( tcp->tcp_state & TCP_STATE_ACKED ( TCP_SYN ) ) )
395 tcp_rx_ack ( tcp, ( tcp->snd_seq + 1 ), 0 );
396
397 /* If we have no data remaining to send, start sending FIN */
398 if ( list_empty ( &tcp->tx_queue ) &&
399 ! ( tcp->tcp_state & TCP_STATE_SENT ( TCP_FIN ) ) ) {
400
401 tcp->tcp_state |= TCP_STATE_SENT ( TCP_FIN );
402 tcp_dump_state ( tcp );
403 process_add ( &tcp->process );
404
405 /* Add a pending operation for the FIN */
406 pending_get ( &tcp->pending_flags );
407 }
408 }
409
410 /***************************************************************************
411 *
412 * Transmit data path
413 *
414 ***************************************************************************
415 */
416
417 /**
418 * Calculate transmission window
419 *
420 * @v tcp TCP connection
421 * @ret len Maximum length that can be sent in a single packet
422 */
423 static size_t tcp_xmit_win ( struct tcp_connection *tcp ) {
424 size_t len;
425
426 /* Not ready if we're not in a suitable connection state */
427 if ( ! TCP_CAN_SEND_DATA ( tcp->tcp_state ) )
428 return 0;
429
430 /* Length is the minimum of the receiver's window and the path MTU */
431 len = tcp->snd_win;
432 if ( len > TCP_PATH_MTU )
433 len = TCP_PATH_MTU;
434
435 return len;
436 }
437
438 /**
439 * Check data-transfer flow control window
440 *
441 * @v tcp TCP connection
442 * @ret len Length of window
443 */
444 static size_t tcp_xfer_window ( struct tcp_connection *tcp ) {
445
446 /* Not ready if data queue is non-empty. This imposes a limit
447 * of only one unACKed packet in the TX queue at any time; we
448 * do this to conserve memory usage.
449 */
450 if ( ! list_empty ( &tcp->tx_queue ) )
451 return 0;
452
453 /* Return TCP window length */
454 return tcp_xmit_win ( tcp );
455 }
456
457 /**
458 * Find selective acknowledgement block
459 *
460 * @v tcp TCP connection
461 * @v seq SEQ value in SACK block (in host-endian order)
462 * @v sack SACK block to fill in (in host-endian order)
463 * @ret len Length of SACK block
464 */
465 static uint32_t tcp_sack_block ( struct tcp_connection *tcp, uint32_t seq,
466 struct tcp_sack_block *sack ) {
467 struct io_buffer *iobuf;
468 struct tcp_rx_queued_header *tcpqhdr;
469 uint32_t left = tcp->rcv_ack;
470 uint32_t right = left;
471
472 /* Find highest block which does not start after SEQ */
473 list_for_each_entry ( iobuf, &tcp->rx_queue, list ) {
474 tcpqhdr = iobuf->data;
475 if ( tcp_cmp ( tcpqhdr->seq, right ) > 0 ) {
476 if ( tcp_cmp ( tcpqhdr->seq, seq ) > 0 )
477 break;
478 left = tcpqhdr->seq;
479 }
480 if ( tcp_cmp ( tcpqhdr->nxt, right ) > 0 )
481 right = tcpqhdr->nxt;
482 }
483
484 /* Fail if this block does not contain SEQ */
485 if ( tcp_cmp ( right, seq ) < 0 )
486 return 0;
487
488 /* Populate SACK block */
489 sack->left = left;
490 sack->right = right;
491 return ( right - left );
492 }
493
494 /**
495 * Update TCP selective acknowledgement list
496 *
497 * @v tcp TCP connection
498 * @v seq SEQ value in first SACK block (in host-endian order)
499 * @ret count Number of SACK blocks
500 */
501 static unsigned int tcp_sack ( struct tcp_connection *tcp, uint32_t seq ) {
502 struct tcp_sack_block sack[TCP_SACK_MAX];
503 unsigned int old = 0;
504 unsigned int new = 0;
505 unsigned int i;
506 uint32_t len;
507
508 /* Populate first new SACK block */
509 len = tcp_sack_block ( tcp, seq, &sack[0] );
510 if ( len )
511 new++;
512
513 /* Populate remaining new SACK blocks based on old SACK blocks */
514 for ( old = 0 ; old < TCP_SACK_MAX ; old++ ) {
515
516 /* Stop if we run out of space in the new list */
517 if ( new == TCP_SACK_MAX )
518 break;
519
520 /* Skip empty old SACK blocks */
521 if ( tcp->sack[old].left == tcp->sack[old].right )
522 continue;
523
524 /* Populate new SACK block */
525 len = tcp_sack_block ( tcp, tcp->sack[old].left, &sack[new] );
526 if ( len == 0 )
527 continue;
528
529 /* Eliminate duplicates */
530 for ( i = 0 ; i < new ; i++ ) {
531 if ( sack[i].left == sack[new].left ) {
532 new--;
533 break;
534 }
535 }
536 new++;
537 }
538
539 /* Update SACK list */
540 memset ( tcp->sack, 0, sizeof ( tcp->sack ) );
541 memcpy ( tcp->sack, sack, ( new * sizeof ( tcp->sack[0] ) ) );
542 return new;
543 }
544
545 /**
546 * Process TCP transmit queue
547 *
548 * @v tcp TCP connection
549 * @v max_len Maximum length to process
550 * @v dest I/O buffer to fill with data, or NULL
551 * @v remove Remove data from queue
552 * @ret len Length of data processed
553 *
554 * This processes at most @c max_len bytes from the TCP connection's
555 * transmit queue. Data will be copied into the @c dest I/O buffer
556 * (if provided) and, if @c remove is true, removed from the transmit
557 * queue.
558 */
559 static size_t tcp_process_tx_queue ( struct tcp_connection *tcp, size_t max_len,
560 struct io_buffer *dest, int remove ) {
561 struct io_buffer *iobuf;
562 struct io_buffer *tmp;
563 size_t frag_len;
564 size_t len = 0;
565
566 list_for_each_entry_safe ( iobuf, tmp, &tcp->tx_queue, list ) {
567 frag_len = iob_len ( iobuf );
568 if ( frag_len > max_len )
569 frag_len = max_len;
570 if ( dest ) {
571 memcpy ( iob_put ( dest, frag_len ), iobuf->data,
572 frag_len );
573 }
574 if ( remove ) {
575 iob_pull ( iobuf, frag_len );
576 if ( ! iob_len ( iobuf ) ) {
577 list_del ( &iobuf->list );
578 free_iob ( iobuf );
579 pending_put ( &tcp->pending_data );
580 }
581 }
582 len += frag_len;
583 max_len -= frag_len;
584 }
585 return len;
586 }
587
588 /**
589 * Transmit any outstanding data (with selective acknowledgement)
590 *
591 * @v tcp TCP connection
592 * @v sack_seq SEQ for first selective acknowledgement (if any)
593 *
594 * Transmits any outstanding data on the connection.
595 *
596 * Note that even if an error is returned, the retransmission timer
597 * will have been started if necessary, and so the stack will
598 * eventually attempt to retransmit the failed packet.
599 */
600 static void tcp_xmit_sack ( struct tcp_connection *tcp, uint32_t sack_seq ) {
601 struct io_buffer *iobuf;
602 struct tcp_header *tcphdr;
603 struct tcp_mss_option *mssopt;
604 struct tcp_window_scale_padded_option *wsopt;
605 struct tcp_timestamp_padded_option *tsopt;
606 struct tcp_sack_permitted_padded_option *spopt;
607 struct tcp_sack_padded_option *sackopt;
608 struct tcp_sack_block *sack;
609 void *payload;
610 unsigned int flags;
611 unsigned int sack_count;
612 unsigned int i;
613 size_t len = 0;
614 size_t sack_len;
615 uint32_t seq_len;
616 uint32_t max_rcv_win;
617 uint32_t max_representable_win;
618 int rc;
619
620 /* Start profiling */
621 profile_start ( &tcp_tx_profiler );
622
623 /* If retransmission timer is already running, do nothing */
624 if ( timer_running ( &tcp->timer ) )
625 return;
626
627 /* Calculate both the actual (payload) and sequence space
628 * lengths that we wish to transmit.
629 */
630 if ( TCP_CAN_SEND_DATA ( tcp->tcp_state ) ) {
631 len = tcp_process_tx_queue ( tcp, tcp_xmit_win ( tcp ),
632 NULL, 0 );
633 }
634 seq_len = len;
635 flags = TCP_FLAGS_SENDING ( tcp->tcp_state );
636 if ( flags & ( TCP_SYN | TCP_FIN ) ) {
637 /* SYN or FIN consume one byte, and we can never send both */
638 assert ( ! ( ( flags & TCP_SYN ) && ( flags & TCP_FIN ) ) );
639 seq_len++;
640 }
641 tcp->snd_sent = seq_len;
642
643 /* If we have nothing to transmit, stop now */
644 if ( ( seq_len == 0 ) && ! ( tcp->flags & TCP_ACK_PENDING ) )
645 return;
646
647 /* If we are transmitting anything that requires
648 * acknowledgement (i.e. consumes sequence space), start the
649 * retransmission timer. Do this before attempting to
650 * allocate the I/O buffer, in case allocation itself fails.
651 */
652 if ( seq_len )
653 start_timer ( &tcp->timer );
654
655 /* Allocate I/O buffer */
656 iobuf = alloc_iob ( len + TCP_MAX_HEADER_LEN );
657 if ( ! iobuf ) {
658 DBGC ( tcp, "TCP %p could not allocate iobuf for %08x..%08x "
659 "%08x\n", tcp, tcp->snd_seq, ( tcp->snd_seq + seq_len ),
660 tcp->rcv_ack );
661 return;
662 }
663 iob_reserve ( iobuf, TCP_MAX_HEADER_LEN );
664
665 /* Fill data payload from transmit queue */
666 tcp_process_tx_queue ( tcp, len, iobuf, 0 );
667
668 /* Expand receive window if possible */
669 max_rcv_win = xfer_window ( &tcp->xfer );
670 if ( max_rcv_win > TCP_MAX_WINDOW_SIZE )
671 max_rcv_win = TCP_MAX_WINDOW_SIZE;
672 max_representable_win = ( 0xffff << tcp->rcv_win_scale );
673 if ( max_rcv_win > max_representable_win )
674 max_rcv_win = max_representable_win;
675 max_rcv_win &= ~0x03; /* Keep everything dword-aligned */
676 if ( tcp->rcv_win < max_rcv_win )
677 tcp->rcv_win = max_rcv_win;
678
679 /* Fill up the TCP header */
680 payload = iobuf->data;
681 if ( flags & TCP_SYN ) {
682 mssopt = iob_push ( iobuf, sizeof ( *mssopt ) );
683 mssopt->kind = TCP_OPTION_MSS;
684 mssopt->length = sizeof ( *mssopt );
685 mssopt->mss = htons ( tcp->mss );
686 wsopt = iob_push ( iobuf, sizeof ( *wsopt ) );
687 wsopt->nop = TCP_OPTION_NOP;
688 wsopt->wsopt.kind = TCP_OPTION_WS;
689 wsopt->wsopt.length = sizeof ( wsopt->wsopt );
690 wsopt->wsopt.scale = TCP_RX_WINDOW_SCALE;
691 spopt = iob_push ( iobuf, sizeof ( *spopt ) );
692 memset ( spopt->nop, TCP_OPTION_NOP, sizeof ( spopt ) );
693 spopt->spopt.kind = TCP_OPTION_SACK_PERMITTED;
694 spopt->spopt.length = sizeof ( spopt->spopt );
695 }
696 if ( ( flags & TCP_SYN ) || ( tcp->flags & TCP_TS_ENABLED ) ) {
697 tsopt = iob_push ( iobuf, sizeof ( *tsopt ) );
698 memset ( tsopt->nop, TCP_OPTION_NOP, sizeof ( tsopt->nop ) );
699 tsopt->tsopt.kind = TCP_OPTION_TS;
700 tsopt->tsopt.length = sizeof ( tsopt->tsopt );
701 tsopt->tsopt.tsval = htonl ( currticks() );
702 tsopt->tsopt.tsecr = htonl ( tcp->ts_recent );
703 }
704 if ( ( tcp->flags & TCP_SACK_ENABLED ) &&
705 ( ! list_empty ( &tcp->rx_queue ) ) &&
706 ( ( sack_count = tcp_sack ( tcp, sack_seq ) ) != 0 ) ) {
707 sack_len = ( sack_count * sizeof ( *sack ) );
708 sackopt = iob_push ( iobuf, ( sizeof ( *sackopt ) + sack_len ));
709 memset ( sackopt->nop, TCP_OPTION_NOP, sizeof ( sackopt->nop ));
710 sackopt->sackopt.kind = TCP_OPTION_SACK;
711 sackopt->sackopt.length =
712 ( sizeof ( sackopt->sackopt ) + sack_len );
713 sack = ( ( ( void * ) sackopt ) + sizeof ( *sackopt ) );
714 for ( i = 0 ; i < sack_count ; i++, sack++ ) {
715 sack->left = htonl ( tcp->sack[i].left );
716 sack->right = htonl ( tcp->sack[i].right );
717 }
718 }
719 if ( len != 0 )
720 flags |= TCP_PSH;
721 tcphdr = iob_push ( iobuf, sizeof ( *tcphdr ) );
722 memset ( tcphdr, 0, sizeof ( *tcphdr ) );
723 tcphdr->src = htons ( tcp->local_port );
724 tcphdr->dest = tcp->peer.st_port;
725 tcphdr->seq = htonl ( tcp->snd_seq );
726 tcphdr->ack = htonl ( tcp->rcv_ack );
727 tcphdr->hlen = ( ( payload - iobuf->data ) << 2 );
728 tcphdr->flags = flags;
729 tcphdr->win = htons ( tcp->rcv_win >> tcp->rcv_win_scale );
730 tcphdr->csum = tcpip_chksum ( iobuf->data, iob_len ( iobuf ) );
731
732 /* Dump header */
733 DBGC2 ( tcp, "TCP %p TX %d->%d %08x..%08x %08x %4zd",
734 tcp, ntohs ( tcphdr->src ), ntohs ( tcphdr->dest ),
735 ntohl ( tcphdr->seq ), ( ntohl ( tcphdr->seq ) + seq_len ),
736 ntohl ( tcphdr->ack ), len );
737 tcp_dump_flags ( tcp, tcphdr->flags );
738 DBGC2 ( tcp, "\n" );
739
740 /* Transmit packet */
741 if ( ( rc = tcpip_tx ( iobuf, &tcp_protocol, NULL, &tcp->peer, NULL,
742 &tcphdr->csum ) ) != 0 ) {
743 DBGC ( tcp, "TCP %p could not transmit %08x..%08x %08x: %s\n",
744 tcp, tcp->snd_seq, ( tcp->snd_seq + tcp->snd_sent ),
745 tcp->rcv_ack, strerror ( rc ) );
746 return;
747 }
748
749 /* Clear ACK-pending flag */
750 tcp->flags &= ~TCP_ACK_PENDING;
751
752 profile_stop ( &tcp_tx_profiler );
753 }
754
755 /**
756 * Transmit any outstanding data
757 *
758 * @v tcp TCP connection
759 */
760 static void tcp_xmit ( struct tcp_connection *tcp ) {
761
762 /* Transmit without an explicit first SACK */
763 tcp_xmit_sack ( tcp, tcp->rcv_ack );
764 }
765
766 /** TCP process descriptor */
767 static struct process_descriptor tcp_process_desc =
768 PROC_DESC_ONCE ( struct tcp_connection, process, tcp_xmit );
769
770 /**
771 * Retransmission timer expired
772 *
773 * @v timer Retransmission timer
774 * @v over Failure indicator
775 */
776 static void tcp_expired ( struct retry_timer *timer, int over ) {
777 struct tcp_connection *tcp =
778 container_of ( timer, struct tcp_connection, timer );
779
780 DBGC ( tcp, "TCP %p timer %s in %s for %08x..%08x %08x\n", tcp,
781 ( over ? "expired" : "fired" ), tcp_state ( tcp->tcp_state ),
782 tcp->snd_seq, ( tcp->snd_seq + tcp->snd_sent ), tcp->rcv_ack );
783
784 assert ( ( tcp->tcp_state == TCP_SYN_SENT ) ||
785 ( tcp->tcp_state == TCP_SYN_RCVD ) ||
786 ( tcp->tcp_state == TCP_ESTABLISHED ) ||
787 ( tcp->tcp_state == TCP_FIN_WAIT_1 ) ||
788 ( tcp->tcp_state == TCP_CLOSE_WAIT ) ||
789 ( tcp->tcp_state == TCP_CLOSING_OR_LAST_ACK ) );
790
791 if ( over ) {
792 /* If we have finally timed out and given up,
793 * terminate the connection
794 */
795 tcp->tcp_state = TCP_CLOSED;
796 tcp_dump_state ( tcp );
797 tcp_close ( tcp, -ETIMEDOUT );
798 } else {
799 /* Otherwise, retransmit the packet */
800 tcp_xmit ( tcp );
801 }
802 }
803
804 /**
805 * Shutdown timer expired
806 *
807 * @v timer Shutdown timer
808 * @v over Failure indicator
809 */
810 static void tcp_wait_expired ( struct retry_timer *timer, int over __unused ) {
811 struct tcp_connection *tcp =
812 container_of ( timer, struct tcp_connection, wait );
813
814 assert ( tcp->tcp_state == TCP_TIME_WAIT );
815
816 DBGC ( tcp, "TCP %p wait complete in %s for %08x..%08x %08x\n", tcp,
817 tcp_state ( tcp->tcp_state ), tcp->snd_seq,
818 ( tcp->snd_seq + tcp->snd_sent ), tcp->rcv_ack );
819
820 tcp->tcp_state = TCP_CLOSED;
821 tcp_dump_state ( tcp );
822 tcp_close ( tcp, 0 );
823 }
824
825 /**
826 * Send RST response to incoming packet
827 *
828 * @v in_tcphdr TCP header of incoming packet
829 * @ret rc Return status code
830 */
831 static int tcp_xmit_reset ( struct tcp_connection *tcp,
832 struct sockaddr_tcpip *st_dest,
833 struct tcp_header *in_tcphdr ) {
834 struct io_buffer *iobuf;
835 struct tcp_header *tcphdr;
836 int rc;
837
838 /* Allocate space for dataless TX buffer */
839 iobuf = alloc_iob ( TCP_MAX_HEADER_LEN );
840 if ( ! iobuf ) {
841 DBGC ( tcp, "TCP %p could not allocate iobuf for RST "
842 "%08x..%08x %08x\n", tcp, ntohl ( in_tcphdr->ack ),
843 ntohl ( in_tcphdr->ack ), ntohl ( in_tcphdr->seq ) );
844 return -ENOMEM;
845 }
846 iob_reserve ( iobuf, TCP_MAX_HEADER_LEN );
847
848 /* Construct RST response */
849 tcphdr = iob_push ( iobuf, sizeof ( *tcphdr ) );
850 memset ( tcphdr, 0, sizeof ( *tcphdr ) );
851 tcphdr->src = in_tcphdr->dest;
852 tcphdr->dest = in_tcphdr->src;
853 tcphdr->seq = in_tcphdr->ack;
854 tcphdr->ack = in_tcphdr->seq;
855 tcphdr->hlen = ( ( sizeof ( *tcphdr ) / 4 ) << 4 );
856 tcphdr->flags = ( TCP_RST | TCP_ACK );
857 tcphdr->win = htons ( 0 );
858 tcphdr->csum = tcpip_chksum ( iobuf->data, iob_len ( iobuf ) );
859
860 /* Dump header */
861 DBGC2 ( tcp, "TCP %p TX %d->%d %08x..%08x %08x %4d",
862 tcp, ntohs ( tcphdr->src ), ntohs ( tcphdr->dest ),
863 ntohl ( tcphdr->seq ), ( ntohl ( tcphdr->seq ) ),
864 ntohl ( tcphdr->ack ), 0 );
865 tcp_dump_flags ( tcp, tcphdr->flags );
866 DBGC2 ( tcp, "\n" );
867
868 /* Transmit packet */
869 if ( ( rc = tcpip_tx ( iobuf, &tcp_protocol, NULL, st_dest,
870 NULL, &tcphdr->csum ) ) != 0 ) {
871 DBGC ( tcp, "TCP %p could not transmit RST %08x..%08x %08x: "
872 "%s\n", tcp, ntohl ( in_tcphdr->ack ),
873 ntohl ( in_tcphdr->ack ), ntohl ( in_tcphdr->seq ),
874 strerror ( rc ) );
875 return rc;
876 }
877
878 return 0;
879 }
880
881 /***************************************************************************
882 *
883 * Receive data path
884 *
885 ***************************************************************************
886 */
887
888 /**
889 * Identify TCP connection by local port number
890 *
891 * @v local_port Local port
892 * @ret tcp TCP connection, or NULL
893 */
894 static struct tcp_connection * tcp_demux ( unsigned int local_port ) {
895 struct tcp_connection *tcp;
896
897 list_for_each_entry ( tcp, &tcp_conns, list ) {
898 if ( tcp->local_port == local_port )
899 return tcp;
900 }
901 return NULL;
902 }
903
904 /**
905 * Parse TCP received options
906 *
907 * @v tcp TCP connection (may be NULL)
908 * @v tcphdr TCP header
909 * @v hlen TCP header length
910 * @v options Options structure to fill in
911 * @ret rc Return status code
912 */
913 static int tcp_rx_opts ( struct tcp_connection *tcp,
914 const struct tcp_header *tcphdr, size_t hlen,
915 struct tcp_options *options ) {
916 const void *data = ( ( ( void * ) tcphdr ) + sizeof ( *tcphdr ) );
917 const void *end = ( ( ( void * ) tcphdr ) + hlen );
918 const struct tcp_option *option;
919 unsigned int kind;
920 size_t remaining;
921 size_t min;
922
923 /* Sanity check */
924 assert ( hlen >= sizeof ( *tcphdr ) );
925
926 /* Parse options */
927 memset ( options, 0, sizeof ( *options ) );
928 while ( ( remaining = ( end - data ) ) ) {
929
930 /* Extract option code */
931 option = data;
932 kind = option->kind;
933
934 /* Handle single-byte options */
935 if ( kind == TCP_OPTION_END )
936 break;
937 if ( kind == TCP_OPTION_NOP ) {
938 data++;
939 continue;
940 }
941
942 /* Handle multi-byte options */
943 min = sizeof ( *option );
944 switch ( kind ) {
945 case TCP_OPTION_MSS:
946 /* Ignore received MSS */
947 break;
948 case TCP_OPTION_WS:
949 options->wsopt = data;
950 min = sizeof ( *options->wsopt );
951 break;
952 case TCP_OPTION_SACK_PERMITTED:
953 options->spopt = data;
954 min = sizeof ( *options->spopt );
955 break;
956 case TCP_OPTION_SACK:
957 /* Ignore received SACKs */
958 break;
959 case TCP_OPTION_TS:
960 options->tsopt = data;
961 min = sizeof ( *options->tsopt );
962 break;
963 default:
964 DBGC ( tcp, "TCP %p received unknown option %d\n",
965 tcp, kind );
966 break;
967 }
968 if ( remaining < min ) {
969 DBGC ( tcp, "TCP %p received truncated option %d\n",
970 tcp, kind );
971 return -EINVAL;
972 }
973 if ( option->length < min ) {
974 DBGC ( tcp, "TCP %p received underlength option %d\n",
975 tcp, kind );
976 return -EINVAL;
977 }
978 if ( option->length > remaining ) {
979 DBGC ( tcp, "TCP %p received overlength option %d\n",
980 tcp, kind );
981 return -EINVAL;
982 }
983 data += option->length;
984 }
985
986 return 0;
987 }
988
989 /**
990 * Consume received sequence space
991 *
992 * @v tcp TCP connection
993 * @v seq_len Sequence space length to consume
994 */
995 static void tcp_rx_seq ( struct tcp_connection *tcp, uint32_t seq_len ) {
996 unsigned int sack;
997
998 /* Sanity check */
999 assert ( seq_len > 0 );
1000
1001 /* Update acknowledgement number */
1002 tcp->rcv_ack += seq_len;
1003
1004 /* Update window */
1005 if ( tcp->rcv_win > seq_len ) {
1006 tcp->rcv_win -= seq_len;
1007 } else {
1008 tcp->rcv_win = 0;
1009 }
1010
1011 /* Update timestamp */
1012 tcp->ts_recent = tcp->ts_val;
1013
1014 /* Update SACK list */
1015 for ( sack = 0 ; sack < TCP_SACK_MAX ; sack++ ) {
1016 if ( tcp->sack[sack].left == tcp->sack[sack].right )
1017 continue;
1018 if ( tcp_cmp ( tcp->sack[sack].left, tcp->rcv_ack ) < 0 )
1019 tcp->sack[sack].left = tcp->rcv_ack;
1020 if ( tcp_cmp ( tcp->sack[sack].right, tcp->rcv_ack ) < 0 )
1021 tcp->sack[sack].right = tcp->rcv_ack;
1022 }
1023
1024 /* Mark ACK as pending */
1025 tcp->flags |= TCP_ACK_PENDING;
1026 }
1027
1028 /**
1029 * Handle TCP received SYN
1030 *
1031 * @v tcp TCP connection
1032 * @v seq SEQ value (in host-endian order)
1033 * @v options TCP options
1034 * @ret rc Return status code
1035 */
1036 static int tcp_rx_syn ( struct tcp_connection *tcp, uint32_t seq,
1037 struct tcp_options *options ) {
1038
1039 /* Synchronise sequence numbers on first SYN */
1040 if ( ! ( tcp->tcp_state & TCP_STATE_RCVD ( TCP_SYN ) ) ) {
1041 tcp->rcv_ack = seq;
1042 if ( options->tsopt )
1043 tcp->flags |= TCP_TS_ENABLED;
1044 if ( options->spopt )
1045 tcp->flags |= TCP_SACK_ENABLED;
1046 if ( options->wsopt ) {
1047 tcp->snd_win_scale = options->wsopt->scale;
1048 tcp->rcv_win_scale = TCP_RX_WINDOW_SCALE;
1049 }
1050 DBGC ( tcp, "TCP %p using %stimestamps, %sSACK, TX window "
1051 "x%d, RX window x%d\n", tcp,
1052 ( ( tcp->flags & TCP_TS_ENABLED ) ? "" : "no " ),
1053 ( ( tcp->flags & TCP_SACK_ENABLED ) ? "" : "no " ),
1054 ( 1 << tcp->snd_win_scale ),
1055 ( 1 << tcp->rcv_win_scale ) );
1056 }
1057
1058 /* Ignore duplicate SYN */
1059 if ( seq != tcp->rcv_ack )
1060 return 0;
1061
1062 /* Acknowledge SYN */
1063 tcp_rx_seq ( tcp, 1 );
1064
1065 /* Mark SYN as received and start sending ACKs with each packet */
1066 tcp->tcp_state |= ( TCP_STATE_SENT ( TCP_ACK ) |
1067 TCP_STATE_RCVD ( TCP_SYN ) );
1068
1069 return 0;
1070 }
1071
1072 /**
1073 * Handle TCP received ACK
1074 *
1075 * @v tcp TCP connection
1076 * @v ack ACK value (in host-endian order)
1077 * @v win WIN value (in host-endian order)
1078 * @ret rc Return status code
1079 */
1080 static int tcp_rx_ack ( struct tcp_connection *tcp, uint32_t ack,
1081 uint32_t win ) {
1082 uint32_t ack_len = ( ack - tcp->snd_seq );
1083 size_t len;
1084 unsigned int acked_flags;
1085
1086 /* Check for out-of-range or old duplicate ACKs */
1087 if ( ack_len > tcp->snd_sent ) {
1088 DBGC ( tcp, "TCP %p received ACK for %08x..%08x, "
1089 "sent only %08x..%08x\n", tcp, tcp->snd_seq,
1090 ( tcp->snd_seq + ack_len ), tcp->snd_seq,
1091 ( tcp->snd_seq + tcp->snd_sent ) );
1092
1093 if ( TCP_HAS_BEEN_ESTABLISHED ( tcp->tcp_state ) ) {
1094 /* Just ignore what might be old duplicate ACKs */
1095 return 0;
1096 } else {
1097 /* Send RST if an out-of-range ACK is received
1098 * on a not-yet-established connection, as per
1099 * RFC 793.
1100 */
1101 return -EINVAL;
1102 }
1103 }
1104
1105 /* Update window size */
1106 tcp->snd_win = win;
1107
1108 /* Ignore ACKs that don't actually acknowledge any new data.
1109 * (In particular, do not stop the retransmission timer; this
1110 * avoids creating a sorceror's apprentice syndrome when a
1111 * duplicate ACK is received and we still have data in our
1112 * transmit queue.)
1113 */
1114 if ( ack_len == 0 )
1115 return 0;
1116
1117 /* Stop the retransmission timer */
1118 stop_timer ( &tcp->timer );
1119
1120 /* Determine acknowledged flags and data length */
1121 len = ack_len;
1122 acked_flags = ( TCP_FLAGS_SENDING ( tcp->tcp_state ) &
1123 ( TCP_SYN | TCP_FIN ) );
1124 if ( acked_flags ) {
1125 len--;
1126 pending_put ( &tcp->pending_flags );
1127 }
1128
1129 /* Update SEQ and sent counters */
1130 tcp->snd_seq = ack;
1131 tcp->snd_sent = 0;
1132
1133 /* Remove any acknowledged data from transmit queue */
1134 tcp_process_tx_queue ( tcp, len, NULL, 1 );
1135
1136 /* Mark SYN/FIN as acknowledged if applicable. */
1137 if ( acked_flags )
1138 tcp->tcp_state |= TCP_STATE_ACKED ( acked_flags );
1139
1140 /* Start sending FIN if we've had all possible data ACKed */
1141 if ( list_empty ( &tcp->tx_queue ) &&
1142 ( tcp->flags & TCP_XFER_CLOSED ) &&
1143 ! ( tcp->tcp_state & TCP_STATE_SENT ( TCP_FIN ) ) ) {
1144 tcp->tcp_state |= TCP_STATE_SENT ( TCP_FIN );
1145 pending_get ( &tcp->pending_flags );
1146 }
1147
1148 return 0;
1149 }
1150
1151 /**
1152 * Handle TCP received data
1153 *
1154 * @v tcp TCP connection
1155 * @v seq SEQ value (in host-endian order)
1156 * @v iobuf I/O buffer
1157 * @ret rc Return status code
1158 *
1159 * This function takes ownership of the I/O buffer.
1160 */
1161 static int tcp_rx_data ( struct tcp_connection *tcp, uint32_t seq,
1162 struct io_buffer *iobuf ) {
1163 uint32_t already_rcvd;
1164 uint32_t len;
1165 int rc;
1166
1167 /* Ignore duplicate or out-of-order data */
1168 already_rcvd = ( tcp->rcv_ack - seq );
1169 len = iob_len ( iobuf );
1170 if ( already_rcvd >= len ) {
1171 free_iob ( iobuf );
1172 return 0;
1173 }
1174 iob_pull ( iobuf, already_rcvd );
1175 len -= already_rcvd;
1176
1177 /* Acknowledge new data */
1178 tcp_rx_seq ( tcp, len );
1179
1180 /* Deliver data to application */
1181 profile_start ( &tcp_xfer_profiler );
1182 if ( ( rc = xfer_deliver_iob ( &tcp->xfer, iobuf ) ) != 0 ) {
1183 DBGC ( tcp, "TCP %p could not deliver %08x..%08x: %s\n",
1184 tcp, seq, ( seq + len ), strerror ( rc ) );
1185 return rc;
1186 }
1187 profile_stop ( &tcp_xfer_profiler );
1188
1189 return 0;
1190 }
1191
1192 /**
1193 * Handle TCP received FIN
1194 *
1195 * @v tcp TCP connection
1196 * @v seq SEQ value (in host-endian order)
1197 * @ret rc Return status code
1198 */
1199 static int tcp_rx_fin ( struct tcp_connection *tcp, uint32_t seq ) {
1200
1201 /* Ignore duplicate or out-of-order FIN */
1202 if ( seq != tcp->rcv_ack )
1203 return 0;
1204
1205 /* Acknowledge FIN */
1206 tcp_rx_seq ( tcp, 1 );
1207
1208 /* Mark FIN as received */
1209 tcp->tcp_state |= TCP_STATE_RCVD ( TCP_FIN );
1210
1211 /* Close connection */
1212 tcp_close ( tcp, 0 );
1213
1214 return 0;
1215 }
1216
1217 /**
1218 * Handle TCP received RST
1219 *
1220 * @v tcp TCP connection
1221 * @v seq SEQ value (in host-endian order)
1222 * @ret rc Return status code
1223 */
1224 static int tcp_rx_rst ( struct tcp_connection *tcp, uint32_t seq ) {
1225
1226 /* Accept RST only if it falls within the window. If we have
1227 * not yet received a SYN, then we have no window to test
1228 * against, so fall back to checking that our SYN has been
1229 * ACKed.
1230 */
1231 if ( tcp->tcp_state & TCP_STATE_RCVD ( TCP_SYN ) ) {
1232 if ( ! tcp_in_window ( seq, tcp->rcv_ack, tcp->rcv_win ) )
1233 return 0;
1234 } else {
1235 if ( ! ( tcp->tcp_state & TCP_STATE_ACKED ( TCP_SYN ) ) )
1236 return 0;
1237 }
1238
1239 /* Abort connection */
1240 tcp->tcp_state = TCP_CLOSED;
1241 tcp_dump_state ( tcp );
1242 tcp_close ( tcp, -ECONNRESET );
1243
1244 DBGC ( tcp, "TCP %p connection reset by peer\n", tcp );
1245 return -ECONNRESET;
1246 }
1247
1248 /**
1249 * Enqueue received TCP packet
1250 *
1251 * @v tcp TCP connection
1252 * @v seq SEQ value (in host-endian order)
1253 * @v flags TCP flags
1254 * @v iobuf I/O buffer
1255 */
1256 static void tcp_rx_enqueue ( struct tcp_connection *tcp, uint32_t seq,
1257 uint8_t flags, struct io_buffer *iobuf ) {
1258 struct tcp_rx_queued_header *tcpqhdr;
1259 struct io_buffer *queued;
1260 size_t len;
1261 uint32_t seq_len;
1262 uint32_t nxt;
1263
1264 /* Calculate remaining flags and sequence length. Note that
1265 * SYN, if present, has already been processed by this point.
1266 */
1267 flags &= TCP_FIN;
1268 len = iob_len ( iobuf );
1269 seq_len = ( len + ( flags ? 1 : 0 ) );
1270 nxt = ( seq + seq_len );
1271
1272 /* Discard immediately (to save memory) if:
1273 *
1274 * a) we have not yet received a SYN (and so have no defined
1275 * receive window), or
1276 * b) the packet lies entirely outside the receive window, or
1277 * c) there is no further content to process.
1278 */
1279 if ( ( ! ( tcp->tcp_state & TCP_STATE_RCVD ( TCP_SYN ) ) ) ||
1280 ( tcp_cmp ( seq, tcp->rcv_ack + tcp->rcv_win ) >= 0 ) ||
1281 ( tcp_cmp ( nxt, tcp->rcv_ack ) < 0 ) ||
1282 ( seq_len == 0 ) ) {
1283 free_iob ( iobuf );
1284 return;
1285 }
1286
1287 /* Add internal header */
1288 tcpqhdr = iob_push ( iobuf, sizeof ( *tcpqhdr ) );
1289 tcpqhdr->seq = seq;
1290 tcpqhdr->nxt = nxt;
1291 tcpqhdr->flags = flags;
1292
1293 /* Add to RX queue */
1294 list_for_each_entry ( queued, &tcp->rx_queue, list ) {
1295 tcpqhdr = queued->data;
1296 if ( tcp_cmp ( seq, tcpqhdr->seq ) < 0 )
1297 break;
1298 }
1299 list_add_tail ( &iobuf->list, &queued->list );
1300 }
1301
1302 /**
1303 * Process receive queue
1304 *
1305 * @v tcp TCP connection
1306 */
1307 static void tcp_process_rx_queue ( struct tcp_connection *tcp ) {
1308 struct io_buffer *iobuf;
1309 struct tcp_rx_queued_header *tcpqhdr;
1310 uint32_t seq;
1311 unsigned int flags;
1312 size_t len;
1313
1314 /* Process all applicable received buffers. Note that we
1315 * cannot use list_for_each_entry() to iterate over the RX
1316 * queue, since tcp_discard() may remove packets from the RX
1317 * queue while we are processing.
1318 */
1319 while ( ( iobuf = list_first_entry ( &tcp->rx_queue, struct io_buffer,
1320 list ) ) ) {
1321
1322 /* Stop processing when we hit the first gap */
1323 tcpqhdr = iobuf->data;
1324 if ( tcp_cmp ( tcpqhdr->seq, tcp->rcv_ack ) > 0 )
1325 break;
1326
1327 /* Strip internal header and remove from RX queue */
1328 list_del ( &iobuf->list );
1329 seq = tcpqhdr->seq;
1330 flags = tcpqhdr->flags;
1331 iob_pull ( iobuf, sizeof ( *tcpqhdr ) );
1332 len = iob_len ( iobuf );
1333
1334 /* Handle new data, if any */
1335 tcp_rx_data ( tcp, seq, iob_disown ( iobuf ) );
1336 seq += len;
1337
1338 /* Handle FIN, if present */
1339 if ( flags & TCP_FIN ) {
1340 tcp_rx_fin ( tcp, seq );
1341 seq++;
1342 }
1343 }
1344 }
1345
1346 /**
1347 * Process received packet
1348 *
1349 * @v iobuf I/O buffer
1350 * @v netdev Network device
1351 * @v st_src Partially-filled source address
1352 * @v st_dest Partially-filled destination address
1353 * @v pshdr_csum Pseudo-header checksum
1354 * @ret rc Return status code
1355 */
1356 static int tcp_rx ( struct io_buffer *iobuf,
1357 struct net_device *netdev __unused,
1358 struct sockaddr_tcpip *st_src,
1359 struct sockaddr_tcpip *st_dest __unused,
1360 uint16_t pshdr_csum ) {
1361 struct tcp_header *tcphdr = iobuf->data;
1362 struct tcp_connection *tcp;
1363 struct tcp_options options;
1364 size_t hlen;
1365 uint16_t csum;
1366 uint32_t seq;
1367 uint32_t ack;
1368 uint16_t raw_win;
1369 uint32_t win;
1370 unsigned int flags;
1371 size_t len;
1372 uint32_t seq_len;
1373 size_t old_xfer_window;
1374 int rc;
1375
1376 /* Start profiling */
1377 profile_start ( &tcp_rx_profiler );
1378
1379 /* Sanity check packet */
1380 if ( iob_len ( iobuf ) < sizeof ( *tcphdr ) ) {
1381 DBG ( "TCP packet too short at %zd bytes (min %zd bytes)\n",
1382 iob_len ( iobuf ), sizeof ( *tcphdr ) );
1383 rc = -EINVAL;
1384 goto discard;
1385 }
1386 hlen = ( ( tcphdr->hlen & TCP_MASK_HLEN ) / 16 ) * 4;
1387 if ( hlen < sizeof ( *tcphdr ) ) {
1388 DBG ( "TCP header too short at %zd bytes (min %zd bytes)\n",
1389 hlen, sizeof ( *tcphdr ) );
1390 rc = -EINVAL;
1391 goto discard;
1392 }
1393 if ( hlen > iob_len ( iobuf ) ) {
1394 DBG ( "TCP header too long at %zd bytes (max %zd bytes)\n",
1395 hlen, iob_len ( iobuf ) );
1396 rc = -EINVAL;
1397 goto discard;
1398 }
1399 csum = tcpip_continue_chksum ( pshdr_csum, iobuf->data,
1400 iob_len ( iobuf ) );
1401 if ( csum != 0 ) {
1402 DBG ( "TCP checksum incorrect (is %04x including checksum "
1403 "field, should be 0000)\n", csum );
1404 rc = -EINVAL;
1405 goto discard;
1406 }
1407
1408 /* Parse parameters from header and strip header */
1409 tcp = tcp_demux ( ntohs ( tcphdr->dest ) );
1410 seq = ntohl ( tcphdr->seq );
1411 ack = ntohl ( tcphdr->ack );
1412 raw_win = ntohs ( tcphdr->win );
1413 flags = tcphdr->flags;
1414 if ( ( rc = tcp_rx_opts ( tcp, tcphdr, hlen, &options ) ) != 0 )
1415 goto discard;
1416 if ( tcp && options.tsopt )
1417 tcp->ts_val = ntohl ( options.tsopt->tsval );
1418 iob_pull ( iobuf, hlen );
1419 len = iob_len ( iobuf );
1420 seq_len = ( len + ( ( flags & TCP_SYN ) ? 1 : 0 ) +
1421 ( ( flags & TCP_FIN ) ? 1 : 0 ) );
1422
1423 /* Dump header */
1424 DBGC2 ( tcp, "TCP %p RX %d<-%d %08x %08x..%08x %4zd",
1425 tcp, ntohs ( tcphdr->dest ), ntohs ( tcphdr->src ),
1426 ntohl ( tcphdr->ack ), ntohl ( tcphdr->seq ),
1427 ( ntohl ( tcphdr->seq ) + seq_len ), len );
1428 tcp_dump_flags ( tcp, tcphdr->flags );
1429 DBGC2 ( tcp, "\n" );
1430
1431 /* If no connection was found, silently drop packet */
1432 if ( ! tcp ) {
1433 rc = -ENOTCONN;
1434 goto discard;
1435 }
1436
1437 /* Record old data-transfer window */
1438 old_xfer_window = tcp_xfer_window ( tcp );
1439
1440 /* Handle ACK, if present */
1441 if ( flags & TCP_ACK ) {
1442 win = ( raw_win << tcp->snd_win_scale );
1443 if ( ( rc = tcp_rx_ack ( tcp, ack, win ) ) != 0 ) {
1444 tcp_xmit_reset ( tcp, st_src, tcphdr );
1445 goto discard;
1446 }
1447 }
1448
1449 /* Force an ACK if this packet is out of order */
1450 if ( ( tcp->tcp_state & TCP_STATE_RCVD ( TCP_SYN ) ) &&
1451 ( seq != tcp->rcv_ack ) ) {
1452 tcp->flags |= TCP_ACK_PENDING;
1453 }
1454
1455 /* Handle SYN, if present */
1456 if ( flags & TCP_SYN ) {
1457 tcp_rx_syn ( tcp, seq, &options );
1458 seq++;
1459 }
1460
1461 /* Handle RST, if present */
1462 if ( flags & TCP_RST ) {
1463 if ( ( rc = tcp_rx_rst ( tcp, seq ) ) != 0 )
1464 goto discard;
1465 }
1466
1467 /* Enqueue received data */
1468 tcp_rx_enqueue ( tcp, seq, flags, iob_disown ( iobuf ) );
1469
1470 /* Process receive queue */
1471 tcp_process_rx_queue ( tcp );
1472
1473 /* Dump out any state change as a result of the received packet */
1474 tcp_dump_state ( tcp );
1475
1476 /* Schedule transmission of ACK (and any pending data). If we
1477 * have received any out-of-order packets (i.e. if the receive
1478 * queue remains non-empty after processing) then send the ACK
1479 * immediately in order to trigger Fast Retransmission.
1480 */
1481 if ( list_empty ( &tcp->rx_queue ) ) {
1482 process_add ( &tcp->process );
1483 } else {
1484 tcp_xmit_sack ( tcp, seq );
1485 }
1486
1487 /* If this packet was the last we expect to receive, set up
1488 * timer to expire and cause the connection to be freed.
1489 */
1490 if ( TCP_CLOSED_GRACEFULLY ( tcp->tcp_state ) ) {
1491 stop_timer ( &tcp->wait );
1492 start_timer_fixed ( &tcp->wait, ( 2 * TCP_MSL ) );
1493 }
1494
1495 /* Notify application if window has changed */
1496 if ( tcp_xfer_window ( tcp ) != old_xfer_window )
1497 xfer_window_changed ( &tcp->xfer );
1498
1499 profile_stop ( &tcp_rx_profiler );
1500 return 0;
1501
1502 discard:
1503 /* Free received packet */
1504 free_iob ( iobuf );
1505 return rc;
1506 }
1507
1508 /** TCP protocol */
1509 struct tcpip_protocol tcp_protocol __tcpip_protocol = {
1510 .name = "TCP",
1511 .rx = tcp_rx,
1512 .tcpip_proto = IP_TCP,
1513 };
1514
1515 /**
1516 * Discard some cached TCP data
1517 *
1518 * @ret discarded Number of cached items discarded
1519 */
1520 static unsigned int tcp_discard ( void ) {
1521 struct tcp_connection *tcp;
1522 struct io_buffer *iobuf;
1523 unsigned int discarded = 0;
1524
1525 /* Try to drop one queued RX packet from each connection */
1526 list_for_each_entry ( tcp, &tcp_conns, list ) {
1527 list_for_each_entry_reverse ( iobuf, &tcp->rx_queue, list ) {
1528
1529 /* Remove packet from queue */
1530 list_del ( &iobuf->list );
1531 free_iob ( iobuf );
1532
1533 /* Report discard */
1534 discarded++;
1535 break;
1536 }
1537 }
1538
1539 return discarded;
1540 }
1541
1542 /** TCP cache discarder */
1543 struct cache_discarder tcp_discarder __cache_discarder ( CACHE_NORMAL ) = {
1544 .discard = tcp_discard,
1545 };
1546
1547 /**
1548 * Find first TCP connection that has not yet been closed
1549 *
1550 * @ret tcp First unclosed connection, or NULL
1551 */
1552 static struct tcp_connection * tcp_first_unclosed ( void ) {
1553 struct tcp_connection *tcp;
1554
1555 /* Find first connection which has not yet been closed */
1556 list_for_each_entry ( tcp, &tcp_conns, list ) {
1557 if ( ! ( tcp->flags & TCP_XFER_CLOSED ) )
1558 return tcp;
1559 }
1560 return NULL;
1561 }
1562
1563 /**
1564 * Find first TCP connection that has not yet finished all operations
1565 *
1566 * @ret tcp First unfinished connection, or NULL
1567 */
1568 static struct tcp_connection * tcp_first_unfinished ( void ) {
1569 struct tcp_connection *tcp;
1570
1571 /* Find first connection which has not yet closed gracefully,
1572 * or which still has a pending transmission (e.g. to ACK the
1573 * received FIN).
1574 */
1575 list_for_each_entry ( tcp, &tcp_conns, list ) {
1576 if ( ( ! TCP_CLOSED_GRACEFULLY ( tcp->tcp_state ) ) ||
1577 process_running ( &tcp->process ) ) {
1578 return tcp;
1579 }
1580 }
1581 return NULL;
1582 }
1583
1584 /**
1585 * Shut down all TCP connections
1586 *
1587 */
1588 static void tcp_shutdown ( int booting __unused ) {
1589 struct tcp_connection *tcp;
1590 unsigned long start;
1591 unsigned long elapsed;
1592
1593 /* Initiate a graceful close of all connections, allowing for
1594 * the fact that the connection list may change as we do so.
1595 */
1596 while ( ( tcp = tcp_first_unclosed() ) ) {
1597 DBGC ( tcp, "TCP %p closing for shutdown\n", tcp );
1598 tcp_close ( tcp, -ECANCELED );
1599 }
1600
1601 /* Wait for all connections to finish closing gracefully */
1602 start = currticks();
1603 while ( ( tcp = tcp_first_unfinished() ) &&
1604 ( ( elapsed = ( currticks() - start ) ) < TCP_FINISH_TIMEOUT )){
1605 step();
1606 }
1607
1608 /* Forcibly close any remaining connections */
1609 while ( ( tcp = list_first_entry ( &tcp_conns, struct tcp_connection,
1610 list ) ) != NULL ) {
1611 tcp->tcp_state = TCP_CLOSED;
1612 tcp_dump_state ( tcp );
1613 tcp_close ( tcp, -ECANCELED );
1614 }
1615 }
1616
1617 /** TCP shutdown function */
1618 struct startup_fn tcp_startup_fn __startup_fn ( STARTUP_LATE ) = {
1619 .shutdown = tcp_shutdown,
1620 };
1621
1622 /***************************************************************************
1623 *
1624 * Data transfer interface
1625 *
1626 ***************************************************************************
1627 */
1628
1629 /**
1630 * Close interface
1631 *
1632 * @v tcp TCP connection
1633 * @v rc Reason for close
1634 */
1635 static void tcp_xfer_close ( struct tcp_connection *tcp, int rc ) {
1636
1637 /* Close data transfer interface */
1638 tcp_close ( tcp, rc );
1639
1640 /* Transmit FIN, if possible */
1641 tcp_xmit ( tcp );
1642 }
1643
1644 /**
1645 * Deliver datagram as I/O buffer
1646 *
1647 * @v tcp TCP connection
1648 * @v iobuf Datagram I/O buffer
1649 * @v meta Data transfer metadata
1650 * @ret rc Return status code
1651 */
1652 static int tcp_xfer_deliver ( struct tcp_connection *tcp,
1653 struct io_buffer *iobuf,
1654 struct xfer_metadata *meta __unused ) {
1655
1656 /* Enqueue packet */
1657 list_add_tail ( &iobuf->list, &tcp->tx_queue );
1658
1659 /* Each enqueued packet is a pending operation */
1660 pending_get ( &tcp->pending_data );
1661
1662 /* Transmit data, if possible */
1663 tcp_xmit ( tcp );
1664
1665 return 0;
1666 }
1667
1668 /** TCP data transfer interface operations */
1669 static struct interface_operation tcp_xfer_operations[] = {
1670 INTF_OP ( xfer_deliver, struct tcp_connection *, tcp_xfer_deliver ),
1671 INTF_OP ( xfer_window, struct tcp_connection *, tcp_xfer_window ),
1672 INTF_OP ( intf_close, struct tcp_connection *, tcp_xfer_close ),
1673 };
1674
1675 /** TCP data transfer interface descriptor */
1676 static struct interface_descriptor tcp_xfer_desc =
1677 INTF_DESC ( struct tcp_connection, xfer, tcp_xfer_operations );
1678
1679 /***************************************************************************
1680 *
1681 * Openers
1682 *
1683 ***************************************************************************
1684 */
1685
1686 /** TCP IPv4 socket opener */
1687 struct socket_opener tcp_ipv4_socket_opener __socket_opener = {
1688 .semantics = TCP_SOCK_STREAM,
1689 .family = AF_INET,
1690 .open = tcp_open,
1691 };
1692
1693 /** TCP IPv6 socket opener */
1694 struct socket_opener tcp_ipv6_socket_opener __socket_opener = {
1695 .semantics = TCP_SOCK_STREAM,
1696 .family = AF_INET6,
1697 .open = tcp_open,
1698 };
1699
1700 /** Linkage hack */
1701 int tcp_sock_stream = TCP_SOCK_STREAM;
1702
1703 /**
1704 * Open TCP URI
1705 *
1706 * @v xfer Data transfer interface
1707 * @v uri URI
1708 * @ret rc Return status code
1709 */
1710 static int tcp_open_uri ( struct interface *xfer, struct uri *uri ) {
1711 struct sockaddr_tcpip peer;
1712
1713 /* Sanity check */
1714 if ( ! uri->host )
1715 return -EINVAL;
1716
1717 memset ( &peer, 0, sizeof ( peer ) );
1718 peer.st_port = htons ( uri_port ( uri, 0 ) );
1719 return xfer_open_named_socket ( xfer, SOCK_STREAM,
1720 ( struct sockaddr * ) &peer,
1721 uri->host, NULL );
1722 }
1723
1724 /** TCP URI opener */
1725 struct uri_opener tcp_uri_opener __uri_opener = {
1726 .scheme = "tcp",
1727 .open = tcp_open_uri,
1728 };
1729