[zbin] Fix check for existence of most recent output byte
[ipxe.git] / src / net / tcp.c
1 #include <string.h>
2 #include <stdlib.h>
3 #include <stdio.h>
4 #include <assert.h>
5 #include <errno.h>
6 #include <byteswap.h>
7 #include <ipxe/timer.h>
8 #include <ipxe/iobuf.h>
9 #include <ipxe/malloc.h>
10 #include <ipxe/init.h>
11 #include <ipxe/retry.h>
12 #include <ipxe/refcnt.h>
13 #include <ipxe/pending.h>
14 #include <ipxe/xfer.h>
15 #include <ipxe/open.h>
16 #include <ipxe/uri.h>
17 #include <ipxe/netdevice.h>
18 #include <ipxe/profile.h>
19 #include <ipxe/process.h>
20 #include <ipxe/tcpip.h>
21 #include <ipxe/tcp.h>
22
23 /** @file
24 *
25 * TCP protocol
26 *
27 */
28
29 FILE_LICENCE ( GPL2_OR_LATER );
30
31 /** A TCP connection */
32 struct tcp_connection {
33 /** Reference counter */
34 struct refcnt refcnt;
35 /** List of TCP connections */
36 struct list_head list;
37
38 /** Flags */
39 unsigned int flags;
40
41 /** Data transfer interface */
42 struct interface xfer;
43
44 /** Remote socket address */
45 struct sockaddr_tcpip peer;
46 /** Local port */
47 unsigned int local_port;
48 /** Maximum segment size */
49 size_t mss;
50
51 /** Current TCP state */
52 unsigned int tcp_state;
53 /** Previous TCP state
54 *
55 * Maintained only for debug messages
56 */
57 unsigned int prev_tcp_state;
58 /** Current sequence number
59 *
60 * Equivalent to SND.UNA in RFC 793 terminology.
61 */
62 uint32_t snd_seq;
63 /** Unacknowledged sequence count
64 *
65 * Equivalent to (SND.NXT-SND.UNA) in RFC 793 terminology.
66 */
67 uint32_t snd_sent;
68 /** Send window
69 *
70 * Equivalent to SND.WND in RFC 793 terminology
71 */
72 uint32_t snd_win;
73 /** Current acknowledgement number
74 *
75 * Equivalent to RCV.NXT in RFC 793 terminology.
76 */
77 uint32_t rcv_ack;
78 /** Receive window
79 *
80 * Equivalent to RCV.WND in RFC 793 terminology.
81 */
82 uint32_t rcv_win;
83 /** Received timestamp value
84 *
85 * Updated when a packet is received; copied to ts_recent when
86 * the window is advanced.
87 */
88 uint32_t ts_val;
89 /** Most recent received timestamp that advanced the window
90 *
91 * Equivalent to TS.Recent in RFC 1323 terminology.
92 */
93 uint32_t ts_recent;
94 /** Send window scale
95 *
96 * Equivalent to Snd.Wind.Scale in RFC 1323 terminology
97 */
98 uint8_t snd_win_scale;
99 /** Receive window scale
100 *
101 * Equivalent to Rcv.Wind.Scale in RFC 1323 terminology
102 */
103 uint8_t rcv_win_scale;
104 /** Maximum receive window */
105 uint32_t max_rcv_win;
106
107 /** Transmit queue */
108 struct list_head tx_queue;
109 /** Receive queue */
110 struct list_head rx_queue;
111 /** Transmission process */
112 struct process process;
113 /** Retransmission timer */
114 struct retry_timer timer;
115 /** Shutdown (TIME_WAIT) timer */
116 struct retry_timer wait;
117
118 /** Pending operations for SYN and FIN */
119 struct pending_operation pending_flags;
120 /** Pending operations for transmit queue */
121 struct pending_operation pending_data;
122 };
123
124 /** TCP flags */
125 enum tcp_flags {
126 /** TCP data transfer interface has been closed */
127 TCP_XFER_CLOSED = 0x0001,
128 /** TCP timestamps are enabled */
129 TCP_TS_ENABLED = 0x0002,
130 /** TCP acknowledgement is pending */
131 TCP_ACK_PENDING = 0x0004,
132 };
133
134 /** TCP internal header
135 *
136 * This is the header that replaces the TCP header for packets
137 * enqueued on the receive queue.
138 */
139 struct tcp_rx_queued_header {
140 /** SEQ value, in host-endian order
141 *
142 * This represents the SEQ value at the time the packet is
143 * enqueued, and so excludes the SYN, if present.
144 */
145 uint32_t seq;
146 /** Flags
147 *
148 * Only FIN is valid within this flags byte; all other flags
149 * have already been processed by the time the packet is
150 * enqueued.
151 */
152 uint8_t flags;
153 /** Reserved */
154 uint8_t reserved[3];
155 };
156
157 /**
158 * List of registered TCP connections
159 */
160 static LIST_HEAD ( tcp_conns );
161
162 /** Transmit profiler */
163 static struct profiler tcp_tx_profiler __profiler = { .name = "tcp.tx" };
164
165 /** Receive profiler */
166 static struct profiler tcp_rx_profiler __profiler = { .name = "tcp.rx" };
167
168 /** Data transfer profiler */
169 static struct profiler tcp_xfer_profiler __profiler = { .name = "tcp.xfer" };
170
171 /* Forward declarations */
172 static struct process_descriptor tcp_process_desc;
173 static struct interface_descriptor tcp_xfer_desc;
174 static void tcp_expired ( struct retry_timer *timer, int over );
175 static void tcp_wait_expired ( struct retry_timer *timer, int over );
176 static struct tcp_connection * tcp_demux ( unsigned int local_port );
177 static int tcp_rx_ack ( struct tcp_connection *tcp, uint32_t ack,
178 uint32_t win );
179
180 /**
181 * Name TCP state
182 *
183 * @v state TCP state
184 * @ret name Name of TCP state
185 */
186 static inline __attribute__ (( always_inline )) const char *
187 tcp_state ( int state ) {
188 switch ( state ) {
189 case TCP_CLOSED: return "CLOSED";
190 case TCP_LISTEN: return "LISTEN";
191 case TCP_SYN_SENT: return "SYN_SENT";
192 case TCP_SYN_RCVD: return "SYN_RCVD";
193 case TCP_ESTABLISHED: return "ESTABLISHED";
194 case TCP_FIN_WAIT_1: return "FIN_WAIT_1";
195 case TCP_FIN_WAIT_2: return "FIN_WAIT_2";
196 case TCP_CLOSING_OR_LAST_ACK: return "CLOSING/LAST_ACK";
197 case TCP_TIME_WAIT: return "TIME_WAIT";
198 case TCP_CLOSE_WAIT: return "CLOSE_WAIT";
199 default: return "INVALID";
200 }
201 }
202
203 /**
204 * Dump TCP state transition
205 *
206 * @v tcp TCP connection
207 */
208 static inline __attribute__ (( always_inline )) void
209 tcp_dump_state ( struct tcp_connection *tcp ) {
210
211 if ( tcp->tcp_state != tcp->prev_tcp_state ) {
212 DBGC ( tcp, "TCP %p transitioned from %s to %s\n", tcp,
213 tcp_state ( tcp->prev_tcp_state ),
214 tcp_state ( tcp->tcp_state ) );
215 }
216 tcp->prev_tcp_state = tcp->tcp_state;
217 }
218
219 /**
220 * Dump TCP flags
221 *
222 * @v flags TCP flags
223 */
224 static inline __attribute__ (( always_inline )) void
225 tcp_dump_flags ( struct tcp_connection *tcp, unsigned int flags ) {
226 if ( flags & TCP_RST )
227 DBGC2 ( tcp, " RST" );
228 if ( flags & TCP_SYN )
229 DBGC2 ( tcp, " SYN" );
230 if ( flags & TCP_PSH )
231 DBGC2 ( tcp, " PSH" );
232 if ( flags & TCP_FIN )
233 DBGC2 ( tcp, " FIN" );
234 if ( flags & TCP_ACK )
235 DBGC2 ( tcp, " ACK" );
236 }
237
238 /***************************************************************************
239 *
240 * Open and close
241 *
242 ***************************************************************************
243 */
244
245 /**
246 * Check if local TCP port is available
247 *
248 * @v port Local port number
249 * @ret port Local port number, or negative error
250 */
251 static int tcp_port_available ( int port ) {
252
253 return ( tcp_demux ( port ) ? -EADDRINUSE : port );
254 }
255
256 /**
257 * Open a TCP connection
258 *
259 * @v xfer Data transfer interface
260 * @v peer Peer socket address
261 * @v local Local socket address, or NULL
262 * @ret rc Return status code
263 */
264 static int tcp_open ( struct interface *xfer, struct sockaddr *peer,
265 struct sockaddr *local ) {
266 struct sockaddr_tcpip *st_peer = ( struct sockaddr_tcpip * ) peer;
267 struct sockaddr_tcpip *st_local = ( struct sockaddr_tcpip * ) local;
268 struct tcp_connection *tcp;
269 size_t mtu;
270 int port;
271 int rc;
272
273 /* Allocate and initialise structure */
274 tcp = zalloc ( sizeof ( *tcp ) );
275 if ( ! tcp )
276 return -ENOMEM;
277 DBGC ( tcp, "TCP %p allocated\n", tcp );
278 ref_init ( &tcp->refcnt, NULL );
279 intf_init ( &tcp->xfer, &tcp_xfer_desc, &tcp->refcnt );
280 process_init_stopped ( &tcp->process, &tcp_process_desc, &tcp->refcnt );
281 timer_init ( &tcp->timer, tcp_expired, &tcp->refcnt );
282 timer_init ( &tcp->wait, tcp_wait_expired, &tcp->refcnt );
283 tcp->prev_tcp_state = TCP_CLOSED;
284 tcp->tcp_state = TCP_STATE_SENT ( TCP_SYN );
285 tcp_dump_state ( tcp );
286 tcp->snd_seq = random();
287 tcp->max_rcv_win = TCP_MAX_WINDOW_SIZE;
288 INIT_LIST_HEAD ( &tcp->tx_queue );
289 INIT_LIST_HEAD ( &tcp->rx_queue );
290 memcpy ( &tcp->peer, st_peer, sizeof ( tcp->peer ) );
291
292 /* Calculate MSS */
293 mtu = tcpip_mtu ( &tcp->peer );
294 if ( ! mtu ) {
295 DBGC ( tcp, "TCP %p has no route to %s\n",
296 tcp, sock_ntoa ( peer ) );
297 rc = -ENETUNREACH;
298 goto err;
299 }
300 tcp->mss = ( mtu - sizeof ( struct tcp_header ) );
301
302 /* Bind to local port */
303 port = tcpip_bind ( st_local, tcp_port_available );
304 if ( port < 0 ) {
305 rc = port;
306 DBGC ( tcp, "TCP %p could not bind: %s\n",
307 tcp, strerror ( rc ) );
308 goto err;
309 }
310 tcp->local_port = port;
311 DBGC ( tcp, "TCP %p bound to port %d\n", tcp, tcp->local_port );
312
313 /* Start timer to initiate SYN */
314 start_timer_nodelay ( &tcp->timer );
315
316 /* Add a pending operation for the SYN */
317 pending_get ( &tcp->pending_flags );
318
319 /* Attach parent interface, transfer reference to connection
320 * list and return
321 */
322 intf_plug_plug ( &tcp->xfer, xfer );
323 list_add ( &tcp->list, &tcp_conns );
324 return 0;
325
326 err:
327 ref_put ( &tcp->refcnt );
328 return rc;
329 }
330
331 /**
332 * Close TCP connection
333 *
334 * @v tcp TCP connection
335 * @v rc Reason for close
336 *
337 * Closes the data transfer interface. If the TCP state machine is in
338 * a suitable state, the connection will be deleted.
339 */
340 static void tcp_close ( struct tcp_connection *tcp, int rc ) {
341 struct io_buffer *iobuf;
342 struct io_buffer *tmp;
343
344 /* Close data transfer interface */
345 intf_shutdown ( &tcp->xfer, rc );
346 tcp->flags |= TCP_XFER_CLOSED;
347
348 /* If we are in CLOSED, or have otherwise not yet received a
349 * SYN (i.e. we are in LISTEN or SYN_SENT), just delete the
350 * connection.
351 */
352 if ( ! ( tcp->tcp_state & TCP_STATE_RCVD ( TCP_SYN ) ) ) {
353
354 /* Transition to CLOSED for the sake of debugging messages */
355 tcp->tcp_state = TCP_CLOSED;
356 tcp_dump_state ( tcp );
357
358 /* Free any unprocessed I/O buffers */
359 list_for_each_entry_safe ( iobuf, tmp, &tcp->rx_queue, list ) {
360 list_del ( &iobuf->list );
361 free_iob ( iobuf );
362 }
363
364 /* Free any unsent I/O buffers */
365 list_for_each_entry_safe ( iobuf, tmp, &tcp->tx_queue, list ) {
366 list_del ( &iobuf->list );
367 free_iob ( iobuf );
368 pending_put ( &tcp->pending_data );
369 }
370 assert ( ! is_pending ( &tcp->pending_data ) );
371
372 /* Remove pending operations for SYN and FIN, if applicable */
373 pending_put ( &tcp->pending_flags );
374 pending_put ( &tcp->pending_flags );
375
376 /* Remove from list and drop reference */
377 process_del ( &tcp->process );
378 stop_timer ( &tcp->timer );
379 stop_timer ( &tcp->wait );
380 list_del ( &tcp->list );
381 ref_put ( &tcp->refcnt );
382 DBGC ( tcp, "TCP %p connection deleted\n", tcp );
383 return;
384 }
385
386 /* If we have not had our SYN acknowledged (i.e. we are in
387 * SYN_RCVD), pretend that it has been acknowledged so that we
388 * can send a FIN without breaking things.
389 */
390 if ( ! ( tcp->tcp_state & TCP_STATE_ACKED ( TCP_SYN ) ) )
391 tcp_rx_ack ( tcp, ( tcp->snd_seq + 1 ), 0 );
392
393 /* If we have no data remaining to send, start sending FIN */
394 if ( list_empty ( &tcp->tx_queue ) &&
395 ! ( tcp->tcp_state & TCP_STATE_SENT ( TCP_FIN ) ) ) {
396
397 tcp->tcp_state |= TCP_STATE_SENT ( TCP_FIN );
398 tcp_dump_state ( tcp );
399
400 /* Add a pending operation for the FIN */
401 pending_get ( &tcp->pending_flags );
402 }
403 }
404
405 /***************************************************************************
406 *
407 * Transmit data path
408 *
409 ***************************************************************************
410 */
411
412 /**
413 * Calculate transmission window
414 *
415 * @v tcp TCP connection
416 * @ret len Maximum length that can be sent in a single packet
417 */
418 static size_t tcp_xmit_win ( struct tcp_connection *tcp ) {
419 size_t len;
420
421 /* Not ready if we're not in a suitable connection state */
422 if ( ! TCP_CAN_SEND_DATA ( tcp->tcp_state ) )
423 return 0;
424
425 /* Length is the minimum of the receiver's window and the path MTU */
426 len = tcp->snd_win;
427 if ( len > TCP_PATH_MTU )
428 len = TCP_PATH_MTU;
429
430 return len;
431 }
432
433 /**
434 * Check data-transfer flow control window
435 *
436 * @v tcp TCP connection
437 * @ret len Length of window
438 */
439 static size_t tcp_xfer_window ( struct tcp_connection *tcp ) {
440
441 /* Not ready if data queue is non-empty. This imposes a limit
442 * of only one unACKed packet in the TX queue at any time; we
443 * do this to conserve memory usage.
444 */
445 if ( ! list_empty ( &tcp->tx_queue ) )
446 return 0;
447
448 /* Return TCP window length */
449 return tcp_xmit_win ( tcp );
450 }
451
452 /**
453 * Process TCP transmit queue
454 *
455 * @v tcp TCP connection
456 * @v max_len Maximum length to process
457 * @v dest I/O buffer to fill with data, or NULL
458 * @v remove Remove data from queue
459 * @ret len Length of data processed
460 *
461 * This processes at most @c max_len bytes from the TCP connection's
462 * transmit queue. Data will be copied into the @c dest I/O buffer
463 * (if provided) and, if @c remove is true, removed from the transmit
464 * queue.
465 */
466 static size_t tcp_process_tx_queue ( struct tcp_connection *tcp, size_t max_len,
467 struct io_buffer *dest, int remove ) {
468 struct io_buffer *iobuf;
469 struct io_buffer *tmp;
470 size_t frag_len;
471 size_t len = 0;
472
473 list_for_each_entry_safe ( iobuf, tmp, &tcp->tx_queue, list ) {
474 frag_len = iob_len ( iobuf );
475 if ( frag_len > max_len )
476 frag_len = max_len;
477 if ( dest ) {
478 memcpy ( iob_put ( dest, frag_len ), iobuf->data,
479 frag_len );
480 }
481 if ( remove ) {
482 iob_pull ( iobuf, frag_len );
483 if ( ! iob_len ( iobuf ) ) {
484 list_del ( &iobuf->list );
485 free_iob ( iobuf );
486 pending_put ( &tcp->pending_data );
487 }
488 }
489 len += frag_len;
490 max_len -= frag_len;
491 }
492 return len;
493 }
494
495 /**
496 * Transmit any outstanding data
497 *
498 * @v tcp TCP connection
499 *
500 * Transmits any outstanding data on the connection.
501 *
502 * Note that even if an error is returned, the retransmission timer
503 * will have been started if necessary, and so the stack will
504 * eventually attempt to retransmit the failed packet.
505 */
506 static void tcp_xmit ( struct tcp_connection *tcp ) {
507 struct io_buffer *iobuf;
508 struct tcp_header *tcphdr;
509 struct tcp_mss_option *mssopt;
510 struct tcp_window_scale_padded_option *wsopt;
511 struct tcp_timestamp_padded_option *tsopt;
512 void *payload;
513 unsigned int flags;
514 size_t len = 0;
515 uint32_t seq_len;
516 uint32_t app_win;
517 uint32_t max_rcv_win;
518 uint32_t max_representable_win;
519 int rc;
520
521 /* Start profiling */
522 profile_start ( &tcp_tx_profiler );
523
524 /* If retransmission timer is already running, do nothing */
525 if ( timer_running ( &tcp->timer ) )
526 return;
527
528 /* Calculate both the actual (payload) and sequence space
529 * lengths that we wish to transmit.
530 */
531 if ( TCP_CAN_SEND_DATA ( tcp->tcp_state ) ) {
532 len = tcp_process_tx_queue ( tcp, tcp_xmit_win ( tcp ),
533 NULL, 0 );
534 }
535 seq_len = len;
536 flags = TCP_FLAGS_SENDING ( tcp->tcp_state );
537 if ( flags & ( TCP_SYN | TCP_FIN ) ) {
538 /* SYN or FIN consume one byte, and we can never send both */
539 assert ( ! ( ( flags & TCP_SYN ) && ( flags & TCP_FIN ) ) );
540 seq_len++;
541 }
542 tcp->snd_sent = seq_len;
543
544 /* If we have nothing to transmit, stop now */
545 if ( ( seq_len == 0 ) && ! ( tcp->flags & TCP_ACK_PENDING ) )
546 return;
547
548 /* If we are transmitting anything that requires
549 * acknowledgement (i.e. consumes sequence space), start the
550 * retransmission timer. Do this before attempting to
551 * allocate the I/O buffer, in case allocation itself fails.
552 */
553 if ( seq_len )
554 start_timer ( &tcp->timer );
555
556 /* Allocate I/O buffer */
557 iobuf = alloc_iob ( len + TCP_MAX_HEADER_LEN );
558 if ( ! iobuf ) {
559 DBGC ( tcp, "TCP %p could not allocate iobuf for %08x..%08x "
560 "%08x\n", tcp, tcp->snd_seq, ( tcp->snd_seq + seq_len ),
561 tcp->rcv_ack );
562 return;
563 }
564 iob_reserve ( iobuf, TCP_MAX_HEADER_LEN );
565
566 /* Fill data payload from transmit queue */
567 tcp_process_tx_queue ( tcp, len, iobuf, 0 );
568
569 /* Expand receive window if possible */
570 max_rcv_win = tcp->max_rcv_win;
571 app_win = xfer_window ( &tcp->xfer );
572 if ( max_rcv_win > app_win )
573 max_rcv_win = app_win;
574 max_representable_win = ( 0xffff << tcp->rcv_win_scale );
575 if ( max_rcv_win > max_representable_win )
576 max_rcv_win = max_representable_win;
577 max_rcv_win &= ~0x03; /* Keep everything dword-aligned */
578 if ( tcp->rcv_win < max_rcv_win )
579 tcp->rcv_win = max_rcv_win;
580
581 /* Fill up the TCP header */
582 payload = iobuf->data;
583 if ( flags & TCP_SYN ) {
584 mssopt = iob_push ( iobuf, sizeof ( *mssopt ) );
585 mssopt->kind = TCP_OPTION_MSS;
586 mssopt->length = sizeof ( *mssopt );
587 mssopt->mss = htons ( tcp->mss );
588 wsopt = iob_push ( iobuf, sizeof ( *wsopt ) );
589 wsopt->nop = TCP_OPTION_NOP;
590 wsopt->wsopt.kind = TCP_OPTION_WS;
591 wsopt->wsopt.length = sizeof ( wsopt->wsopt );
592 wsopt->wsopt.scale = TCP_RX_WINDOW_SCALE;
593 }
594 if ( ( flags & TCP_SYN ) || ( tcp->flags & TCP_TS_ENABLED ) ) {
595 tsopt = iob_push ( iobuf, sizeof ( *tsopt ) );
596 memset ( tsopt->nop, TCP_OPTION_NOP, sizeof ( tsopt->nop ) );
597 tsopt->tsopt.kind = TCP_OPTION_TS;
598 tsopt->tsopt.length = sizeof ( tsopt->tsopt );
599 tsopt->tsopt.tsval = htonl ( currticks() );
600 tsopt->tsopt.tsecr = htonl ( tcp->ts_recent );
601 }
602 if ( len != 0 )
603 flags |= TCP_PSH;
604 tcphdr = iob_push ( iobuf, sizeof ( *tcphdr ) );
605 memset ( tcphdr, 0, sizeof ( *tcphdr ) );
606 tcphdr->src = htons ( tcp->local_port );
607 tcphdr->dest = tcp->peer.st_port;
608 tcphdr->seq = htonl ( tcp->snd_seq );
609 tcphdr->ack = htonl ( tcp->rcv_ack );
610 tcphdr->hlen = ( ( payload - iobuf->data ) << 2 );
611 tcphdr->flags = flags;
612 tcphdr->win = htons ( tcp->rcv_win >> tcp->rcv_win_scale );
613 tcphdr->csum = tcpip_chksum ( iobuf->data, iob_len ( iobuf ) );
614
615 /* Dump header */
616 DBGC2 ( tcp, "TCP %p TX %d->%d %08x..%08x %08x %4zd",
617 tcp, ntohs ( tcphdr->src ), ntohs ( tcphdr->dest ),
618 ntohl ( tcphdr->seq ), ( ntohl ( tcphdr->seq ) + seq_len ),
619 ntohl ( tcphdr->ack ), len );
620 tcp_dump_flags ( tcp, tcphdr->flags );
621 DBGC2 ( tcp, "\n" );
622
623 /* Transmit packet */
624 if ( ( rc = tcpip_tx ( iobuf, &tcp_protocol, NULL, &tcp->peer, NULL,
625 &tcphdr->csum ) ) != 0 ) {
626 DBGC ( tcp, "TCP %p could not transmit %08x..%08x %08x: %s\n",
627 tcp, tcp->snd_seq, ( tcp->snd_seq + tcp->snd_sent ),
628 tcp->rcv_ack, strerror ( rc ) );
629 return;
630 }
631
632 /* Clear ACK-pending flag */
633 tcp->flags &= ~TCP_ACK_PENDING;
634
635 profile_stop ( &tcp_tx_profiler );
636 }
637
638 /** TCP process descriptor */
639 static struct process_descriptor tcp_process_desc =
640 PROC_DESC_ONCE ( struct tcp_connection, process, tcp_xmit );
641
642 /**
643 * Retransmission timer expired
644 *
645 * @v timer Retransmission timer
646 * @v over Failure indicator
647 */
648 static void tcp_expired ( struct retry_timer *timer, int over ) {
649 struct tcp_connection *tcp =
650 container_of ( timer, struct tcp_connection, timer );
651
652 DBGC ( tcp, "TCP %p timer %s in %s for %08x..%08x %08x\n", tcp,
653 ( over ? "expired" : "fired" ), tcp_state ( tcp->tcp_state ),
654 tcp->snd_seq, ( tcp->snd_seq + tcp->snd_sent ), tcp->rcv_ack );
655
656 assert ( ( tcp->tcp_state == TCP_SYN_SENT ) ||
657 ( tcp->tcp_state == TCP_SYN_RCVD ) ||
658 ( tcp->tcp_state == TCP_ESTABLISHED ) ||
659 ( tcp->tcp_state == TCP_FIN_WAIT_1 ) ||
660 ( tcp->tcp_state == TCP_CLOSE_WAIT ) ||
661 ( tcp->tcp_state == TCP_CLOSING_OR_LAST_ACK ) );
662
663 if ( over ) {
664 /* If we have finally timed out and given up,
665 * terminate the connection
666 */
667 tcp->tcp_state = TCP_CLOSED;
668 tcp_dump_state ( tcp );
669 tcp_close ( tcp, -ETIMEDOUT );
670 } else {
671 /* Otherwise, retransmit the packet */
672 tcp_xmit ( tcp );
673 }
674 }
675
676 /**
677 * Shutdown timer expired
678 *
679 * @v timer Shutdown timer
680 * @v over Failure indicator
681 */
682 static void tcp_wait_expired ( struct retry_timer *timer, int over __unused ) {
683 struct tcp_connection *tcp =
684 container_of ( timer, struct tcp_connection, wait );
685
686 assert ( tcp->tcp_state == TCP_TIME_WAIT );
687
688 DBGC ( tcp, "TCP %p wait complete in %s for %08x..%08x %08x\n", tcp,
689 tcp_state ( tcp->tcp_state ), tcp->snd_seq,
690 ( tcp->snd_seq + tcp->snd_sent ), tcp->rcv_ack );
691
692 tcp->tcp_state = TCP_CLOSED;
693 tcp_dump_state ( tcp );
694 tcp_close ( tcp, 0 );
695 }
696
697 /**
698 * Send RST response to incoming packet
699 *
700 * @v in_tcphdr TCP header of incoming packet
701 * @ret rc Return status code
702 */
703 static int tcp_xmit_reset ( struct tcp_connection *tcp,
704 struct sockaddr_tcpip *st_dest,
705 struct tcp_header *in_tcphdr ) {
706 struct io_buffer *iobuf;
707 struct tcp_header *tcphdr;
708 int rc;
709
710 /* Allocate space for dataless TX buffer */
711 iobuf = alloc_iob ( TCP_MAX_HEADER_LEN );
712 if ( ! iobuf ) {
713 DBGC ( tcp, "TCP %p could not allocate iobuf for RST "
714 "%08x..%08x %08x\n", tcp, ntohl ( in_tcphdr->ack ),
715 ntohl ( in_tcphdr->ack ), ntohl ( in_tcphdr->seq ) );
716 return -ENOMEM;
717 }
718 iob_reserve ( iobuf, TCP_MAX_HEADER_LEN );
719
720 /* Construct RST response */
721 tcphdr = iob_push ( iobuf, sizeof ( *tcphdr ) );
722 memset ( tcphdr, 0, sizeof ( *tcphdr ) );
723 tcphdr->src = in_tcphdr->dest;
724 tcphdr->dest = in_tcphdr->src;
725 tcphdr->seq = in_tcphdr->ack;
726 tcphdr->ack = in_tcphdr->seq;
727 tcphdr->hlen = ( ( sizeof ( *tcphdr ) / 4 ) << 4 );
728 tcphdr->flags = ( TCP_RST | TCP_ACK );
729 tcphdr->win = htons ( 0 );
730 tcphdr->csum = tcpip_chksum ( iobuf->data, iob_len ( iobuf ) );
731
732 /* Dump header */
733 DBGC2 ( tcp, "TCP %p TX %d->%d %08x..%08x %08x %4d",
734 tcp, ntohs ( tcphdr->src ), ntohs ( tcphdr->dest ),
735 ntohl ( tcphdr->seq ), ( ntohl ( tcphdr->seq ) ),
736 ntohl ( tcphdr->ack ), 0 );
737 tcp_dump_flags ( tcp, tcphdr->flags );
738 DBGC2 ( tcp, "\n" );
739
740 /* Transmit packet */
741 if ( ( rc = tcpip_tx ( iobuf, &tcp_protocol, NULL, st_dest,
742 NULL, &tcphdr->csum ) ) != 0 ) {
743 DBGC ( tcp, "TCP %p could not transmit RST %08x..%08x %08x: "
744 "%s\n", tcp, ntohl ( in_tcphdr->ack ),
745 ntohl ( in_tcphdr->ack ), ntohl ( in_tcphdr->seq ),
746 strerror ( rc ) );
747 return rc;
748 }
749
750 return 0;
751 }
752
753 /***************************************************************************
754 *
755 * Receive data path
756 *
757 ***************************************************************************
758 */
759
760 /**
761 * Identify TCP connection by local port number
762 *
763 * @v local_port Local port
764 * @ret tcp TCP connection, or NULL
765 */
766 static struct tcp_connection * tcp_demux ( unsigned int local_port ) {
767 struct tcp_connection *tcp;
768
769 list_for_each_entry ( tcp, &tcp_conns, list ) {
770 if ( tcp->local_port == local_port )
771 return tcp;
772 }
773 return NULL;
774 }
775
776 /**
777 * Parse TCP received options
778 *
779 * @v tcp TCP connection
780 * @v data Raw options data
781 * @v len Raw options length
782 * @v options Options structure to fill in
783 */
784 static void tcp_rx_opts ( struct tcp_connection *tcp, const void *data,
785 size_t len, struct tcp_options *options ) {
786 const void *end = ( data + len );
787 const struct tcp_option *option;
788 unsigned int kind;
789
790 memset ( options, 0, sizeof ( *options ) );
791 while ( data < end ) {
792 option = data;
793 kind = option->kind;
794 if ( kind == TCP_OPTION_END )
795 return;
796 if ( kind == TCP_OPTION_NOP ) {
797 data++;
798 continue;
799 }
800 switch ( kind ) {
801 case TCP_OPTION_MSS:
802 options->mssopt = data;
803 break;
804 case TCP_OPTION_WS:
805 options->wsopt = data;
806 break;
807 case TCP_OPTION_TS:
808 options->tsopt = data;
809 break;
810 default:
811 DBGC ( tcp, "TCP %p received unknown option %d\n",
812 tcp, kind );
813 break;
814 }
815 data += option->length;
816 }
817 }
818
819 /**
820 * Consume received sequence space
821 *
822 * @v tcp TCP connection
823 * @v seq_len Sequence space length to consume
824 */
825 static void tcp_rx_seq ( struct tcp_connection *tcp, uint32_t seq_len ) {
826
827 /* Sanity check */
828 assert ( seq_len > 0 );
829
830 /* Update acknowledgement number */
831 tcp->rcv_ack += seq_len;
832
833 /* Update window */
834 if ( tcp->rcv_win > seq_len ) {
835 tcp->rcv_win -= seq_len;
836 } else {
837 tcp->rcv_win = 0;
838 }
839
840 /* Update timestamp */
841 tcp->ts_recent = tcp->ts_val;
842
843 /* Mark ACK as pending */
844 tcp->flags |= TCP_ACK_PENDING;
845 }
846
847 /**
848 * Handle TCP received SYN
849 *
850 * @v tcp TCP connection
851 * @v seq SEQ value (in host-endian order)
852 * @v options TCP options
853 * @ret rc Return status code
854 */
855 static int tcp_rx_syn ( struct tcp_connection *tcp, uint32_t seq,
856 struct tcp_options *options ) {
857
858 /* Synchronise sequence numbers on first SYN */
859 if ( ! ( tcp->tcp_state & TCP_STATE_RCVD ( TCP_SYN ) ) ) {
860 tcp->rcv_ack = seq;
861 if ( options->tsopt )
862 tcp->flags |= TCP_TS_ENABLED;
863 if ( options->wsopt ) {
864 tcp->snd_win_scale = options->wsopt->scale;
865 tcp->rcv_win_scale = TCP_RX_WINDOW_SCALE;
866 }
867 }
868
869 /* Ignore duplicate SYN */
870 if ( seq != tcp->rcv_ack )
871 return 0;
872
873 /* Acknowledge SYN */
874 tcp_rx_seq ( tcp, 1 );
875
876 /* Mark SYN as received and start sending ACKs with each packet */
877 tcp->tcp_state |= ( TCP_STATE_SENT ( TCP_ACK ) |
878 TCP_STATE_RCVD ( TCP_SYN ) );
879
880 return 0;
881 }
882
883 /**
884 * Handle TCP received ACK
885 *
886 * @v tcp TCP connection
887 * @v ack ACK value (in host-endian order)
888 * @v win WIN value (in host-endian order)
889 * @ret rc Return status code
890 */
891 static int tcp_rx_ack ( struct tcp_connection *tcp, uint32_t ack,
892 uint32_t win ) {
893 uint32_t ack_len = ( ack - tcp->snd_seq );
894 size_t len;
895 unsigned int acked_flags;
896
897 /* Check for out-of-range or old duplicate ACKs */
898 if ( ack_len > tcp->snd_sent ) {
899 DBGC ( tcp, "TCP %p received ACK for %08x..%08x, "
900 "sent only %08x..%08x\n", tcp, tcp->snd_seq,
901 ( tcp->snd_seq + ack_len ), tcp->snd_seq,
902 ( tcp->snd_seq + tcp->snd_sent ) );
903
904 if ( TCP_HAS_BEEN_ESTABLISHED ( tcp->tcp_state ) ) {
905 /* Just ignore what might be old duplicate ACKs */
906 return 0;
907 } else {
908 /* Send RST if an out-of-range ACK is received
909 * on a not-yet-established connection, as per
910 * RFC 793.
911 */
912 return -EINVAL;
913 }
914 }
915
916 /* Update window size */
917 tcp->snd_win = win;
918
919 /* Ignore ACKs that don't actually acknowledge any new data.
920 * (In particular, do not stop the retransmission timer; this
921 * avoids creating a sorceror's apprentice syndrome when a
922 * duplicate ACK is received and we still have data in our
923 * transmit queue.)
924 */
925 if ( ack_len == 0 )
926 return 0;
927
928 /* Stop the retransmission timer */
929 stop_timer ( &tcp->timer );
930
931 /* Determine acknowledged flags and data length */
932 len = ack_len;
933 acked_flags = ( TCP_FLAGS_SENDING ( tcp->tcp_state ) &
934 ( TCP_SYN | TCP_FIN ) );
935 if ( acked_flags ) {
936 len--;
937 pending_put ( &tcp->pending_flags );
938 }
939
940 /* Update SEQ and sent counters */
941 tcp->snd_seq = ack;
942 tcp->snd_sent = 0;
943
944 /* Remove any acknowledged data from transmit queue */
945 tcp_process_tx_queue ( tcp, len, NULL, 1 );
946
947 /* Mark SYN/FIN as acknowledged if applicable. */
948 if ( acked_flags )
949 tcp->tcp_state |= TCP_STATE_ACKED ( acked_flags );
950
951 /* Start sending FIN if we've had all possible data ACKed */
952 if ( list_empty ( &tcp->tx_queue ) &&
953 ( tcp->flags & TCP_XFER_CLOSED ) &&
954 ! ( tcp->tcp_state & TCP_STATE_SENT ( TCP_FIN ) ) ) {
955 tcp->tcp_state |= TCP_STATE_SENT ( TCP_FIN );
956 pending_get ( &tcp->pending_flags );
957 }
958
959 return 0;
960 }
961
962 /**
963 * Handle TCP received data
964 *
965 * @v tcp TCP connection
966 * @v seq SEQ value (in host-endian order)
967 * @v iobuf I/O buffer
968 * @ret rc Return status code
969 *
970 * This function takes ownership of the I/O buffer.
971 */
972 static int tcp_rx_data ( struct tcp_connection *tcp, uint32_t seq,
973 struct io_buffer *iobuf ) {
974 uint32_t already_rcvd;
975 uint32_t len;
976 int rc;
977
978 /* Ignore duplicate or out-of-order data */
979 already_rcvd = ( tcp->rcv_ack - seq );
980 len = iob_len ( iobuf );
981 if ( already_rcvd >= len ) {
982 free_iob ( iobuf );
983 return 0;
984 }
985 iob_pull ( iobuf, already_rcvd );
986 len -= already_rcvd;
987
988 /* Acknowledge new data */
989 tcp_rx_seq ( tcp, len );
990
991 /* Deliver data to application */
992 profile_start ( &tcp_xfer_profiler );
993 if ( ( rc = xfer_deliver_iob ( &tcp->xfer, iobuf ) ) != 0 ) {
994 DBGC ( tcp, "TCP %p could not deliver %08x..%08x: %s\n",
995 tcp, seq, ( seq + len ), strerror ( rc ) );
996 return rc;
997 }
998 profile_stop ( &tcp_xfer_profiler );
999
1000 return 0;
1001 }
1002
1003 /**
1004 * Handle TCP received FIN
1005 *
1006 * @v tcp TCP connection
1007 * @v seq SEQ value (in host-endian order)
1008 * @ret rc Return status code
1009 */
1010 static int tcp_rx_fin ( struct tcp_connection *tcp, uint32_t seq ) {
1011
1012 /* Ignore duplicate or out-of-order FIN */
1013 if ( seq != tcp->rcv_ack )
1014 return 0;
1015
1016 /* Acknowledge FIN */
1017 tcp_rx_seq ( tcp, 1 );
1018
1019 /* Mark FIN as received */
1020 tcp->tcp_state |= TCP_STATE_RCVD ( TCP_FIN );
1021
1022 /* Close connection */
1023 tcp_close ( tcp, 0 );
1024
1025 return 0;
1026 }
1027
1028 /**
1029 * Handle TCP received RST
1030 *
1031 * @v tcp TCP connection
1032 * @v seq SEQ value (in host-endian order)
1033 * @ret rc Return status code
1034 */
1035 static int tcp_rx_rst ( struct tcp_connection *tcp, uint32_t seq ) {
1036
1037 /* Accept RST only if it falls within the window. If we have
1038 * not yet received a SYN, then we have no window to test
1039 * against, so fall back to checking that our SYN has been
1040 * ACKed.
1041 */
1042 if ( tcp->tcp_state & TCP_STATE_RCVD ( TCP_SYN ) ) {
1043 if ( ! tcp_in_window ( seq, tcp->rcv_ack, tcp->rcv_win ) )
1044 return 0;
1045 } else {
1046 if ( ! ( tcp->tcp_state & TCP_STATE_ACKED ( TCP_SYN ) ) )
1047 return 0;
1048 }
1049
1050 /* Abort connection */
1051 tcp->tcp_state = TCP_CLOSED;
1052 tcp_dump_state ( tcp );
1053 tcp_close ( tcp, -ECONNRESET );
1054
1055 DBGC ( tcp, "TCP %p connection reset by peer\n", tcp );
1056 return -ECONNRESET;
1057 }
1058
1059 /**
1060 * Enqueue received TCP packet
1061 *
1062 * @v tcp TCP connection
1063 * @v seq SEQ value (in host-endian order)
1064 * @v flags TCP flags
1065 * @v iobuf I/O buffer
1066 */
1067 static void tcp_rx_enqueue ( struct tcp_connection *tcp, uint32_t seq,
1068 uint8_t flags, struct io_buffer *iobuf ) {
1069 struct tcp_rx_queued_header *tcpqhdr;
1070 struct io_buffer *queued;
1071 size_t len;
1072 uint32_t seq_len;
1073
1074 /* Calculate remaining flags and sequence length. Note that
1075 * SYN, if present, has already been processed by this point.
1076 */
1077 flags &= TCP_FIN;
1078 len = iob_len ( iobuf );
1079 seq_len = ( len + ( flags ? 1 : 0 ) );
1080
1081 /* Discard immediately (to save memory) if:
1082 *
1083 * a) we have not yet received a SYN (and so have no defined
1084 * receive window), or
1085 * b) the packet lies entirely outside the receive window, or
1086 * c) there is no further content to process.
1087 */
1088 if ( ( ! ( tcp->tcp_state & TCP_STATE_RCVD ( TCP_SYN ) ) ) ||
1089 ( tcp_cmp ( seq, tcp->rcv_ack + tcp->rcv_win ) >= 0 ) ||
1090 ( tcp_cmp ( seq + seq_len, tcp->rcv_ack ) < 0 ) ||
1091 ( seq_len == 0 ) ) {
1092 free_iob ( iobuf );
1093 return;
1094 }
1095
1096 /* Add internal header */
1097 tcpqhdr = iob_push ( iobuf, sizeof ( *tcpqhdr ) );
1098 tcpqhdr->seq = seq;
1099 tcpqhdr->flags = flags;
1100
1101 /* Add to RX queue */
1102 list_for_each_entry ( queued, &tcp->rx_queue, list ) {
1103 tcpqhdr = queued->data;
1104 if ( tcp_cmp ( seq, tcpqhdr->seq ) < 0 )
1105 break;
1106 }
1107 list_add_tail ( &iobuf->list, &queued->list );
1108 }
1109
1110 /**
1111 * Process receive queue
1112 *
1113 * @v tcp TCP connection
1114 */
1115 static void tcp_process_rx_queue ( struct tcp_connection *tcp ) {
1116 struct io_buffer *iobuf;
1117 struct tcp_rx_queued_header *tcpqhdr;
1118 uint32_t seq;
1119 unsigned int flags;
1120 size_t len;
1121
1122 /* Process all applicable received buffers. Note that we
1123 * cannot use list_for_each_entry() to iterate over the RX
1124 * queue, since tcp_discard() may remove packets from the RX
1125 * queue while we are processing.
1126 */
1127 while ( ( iobuf = list_first_entry ( &tcp->rx_queue, struct io_buffer,
1128 list ) ) ) {
1129
1130 /* Stop processing when we hit the first gap */
1131 tcpqhdr = iobuf->data;
1132 if ( tcp_cmp ( tcpqhdr->seq, tcp->rcv_ack ) > 0 )
1133 break;
1134
1135 /* Strip internal header and remove from RX queue */
1136 list_del ( &iobuf->list );
1137 seq = tcpqhdr->seq;
1138 flags = tcpqhdr->flags;
1139 iob_pull ( iobuf, sizeof ( *tcpqhdr ) );
1140 len = iob_len ( iobuf );
1141
1142 /* Handle new data, if any */
1143 tcp_rx_data ( tcp, seq, iob_disown ( iobuf ) );
1144 seq += len;
1145
1146 /* Handle FIN, if present */
1147 if ( flags & TCP_FIN ) {
1148 tcp_rx_fin ( tcp, seq );
1149 seq++;
1150 }
1151 }
1152 }
1153
1154 /**
1155 * Process received packet
1156 *
1157 * @v iobuf I/O buffer
1158 * @v netdev Network device
1159 * @v st_src Partially-filled source address
1160 * @v st_dest Partially-filled destination address
1161 * @v pshdr_csum Pseudo-header checksum
1162 * @ret rc Return status code
1163 */
1164 static int tcp_rx ( struct io_buffer *iobuf,
1165 struct net_device *netdev __unused,
1166 struct sockaddr_tcpip *st_src,
1167 struct sockaddr_tcpip *st_dest __unused,
1168 uint16_t pshdr_csum ) {
1169 struct tcp_header *tcphdr = iobuf->data;
1170 struct tcp_connection *tcp;
1171 struct tcp_options options;
1172 size_t hlen;
1173 uint16_t csum;
1174 uint32_t seq;
1175 uint32_t ack;
1176 uint16_t raw_win;
1177 uint32_t win;
1178 unsigned int flags;
1179 size_t len;
1180 uint32_t seq_len;
1181 size_t old_xfer_window;
1182 int rc;
1183
1184 /* Start profiling */
1185 profile_start ( &tcp_rx_profiler );
1186
1187 /* Sanity check packet */
1188 if ( iob_len ( iobuf ) < sizeof ( *tcphdr ) ) {
1189 DBG ( "TCP packet too short at %zd bytes (min %zd bytes)\n",
1190 iob_len ( iobuf ), sizeof ( *tcphdr ) );
1191 rc = -EINVAL;
1192 goto discard;
1193 }
1194 hlen = ( ( tcphdr->hlen & TCP_MASK_HLEN ) / 16 ) * 4;
1195 if ( hlen < sizeof ( *tcphdr ) ) {
1196 DBG ( "TCP header too short at %zd bytes (min %zd bytes)\n",
1197 hlen, sizeof ( *tcphdr ) );
1198 rc = -EINVAL;
1199 goto discard;
1200 }
1201 if ( hlen > iob_len ( iobuf ) ) {
1202 DBG ( "TCP header too long at %zd bytes (max %zd bytes)\n",
1203 hlen, iob_len ( iobuf ) );
1204 rc = -EINVAL;
1205 goto discard;
1206 }
1207 csum = tcpip_continue_chksum ( pshdr_csum, iobuf->data,
1208 iob_len ( iobuf ) );
1209 if ( csum != 0 ) {
1210 DBG ( "TCP checksum incorrect (is %04x including checksum "
1211 "field, should be 0000)\n", csum );
1212 rc = -EINVAL;
1213 goto discard;
1214 }
1215
1216 /* Parse parameters from header and strip header */
1217 tcp = tcp_demux ( ntohs ( tcphdr->dest ) );
1218 seq = ntohl ( tcphdr->seq );
1219 ack = ntohl ( tcphdr->ack );
1220 raw_win = ntohs ( tcphdr->win );
1221 flags = tcphdr->flags;
1222 tcp_rx_opts ( tcp, ( ( ( void * ) tcphdr ) + sizeof ( *tcphdr ) ),
1223 ( hlen - sizeof ( *tcphdr ) ), &options );
1224 if ( tcp && options.tsopt )
1225 tcp->ts_val = ntohl ( options.tsopt->tsval );
1226 iob_pull ( iobuf, hlen );
1227 len = iob_len ( iobuf );
1228 seq_len = ( len + ( ( flags & TCP_SYN ) ? 1 : 0 ) +
1229 ( ( flags & TCP_FIN ) ? 1 : 0 ) );
1230
1231 /* Dump header */
1232 DBGC2 ( tcp, "TCP %p RX %d<-%d %08x %08x..%08x %4zd",
1233 tcp, ntohs ( tcphdr->dest ), ntohs ( tcphdr->src ),
1234 ntohl ( tcphdr->ack ), ntohl ( tcphdr->seq ),
1235 ( ntohl ( tcphdr->seq ) + seq_len ), len );
1236 tcp_dump_flags ( tcp, tcphdr->flags );
1237 DBGC2 ( tcp, "\n" );
1238
1239 /* If no connection was found, silently drop packet */
1240 if ( ! tcp ) {
1241 rc = -ENOTCONN;
1242 goto discard;
1243 }
1244
1245 /* Record old data-transfer window */
1246 old_xfer_window = tcp_xfer_window ( tcp );
1247
1248 /* Handle ACK, if present */
1249 if ( flags & TCP_ACK ) {
1250 win = ( raw_win << tcp->snd_win_scale );
1251 if ( ( rc = tcp_rx_ack ( tcp, ack, win ) ) != 0 ) {
1252 tcp_xmit_reset ( tcp, st_src, tcphdr );
1253 goto discard;
1254 }
1255 }
1256
1257 /* Force an ACK if this packet is out of order */
1258 if ( ( tcp->tcp_state & TCP_STATE_RCVD ( TCP_SYN ) ) &&
1259 ( seq != tcp->rcv_ack ) ) {
1260 tcp->flags |= TCP_ACK_PENDING;
1261 }
1262
1263 /* Handle SYN, if present */
1264 if ( flags & TCP_SYN ) {
1265 tcp_rx_syn ( tcp, seq, &options );
1266 seq++;
1267 }
1268
1269 /* Handle RST, if present */
1270 if ( flags & TCP_RST ) {
1271 if ( ( rc = tcp_rx_rst ( tcp, seq ) ) != 0 )
1272 goto discard;
1273 }
1274
1275 /* Enqueue received data */
1276 tcp_rx_enqueue ( tcp, seq, flags, iob_disown ( iobuf ) );
1277
1278 /* Process receive queue */
1279 tcp_process_rx_queue ( tcp );
1280
1281 /* Dump out any state change as a result of the received packet */
1282 tcp_dump_state ( tcp );
1283
1284 /* Schedule transmission of ACK (and any pending data). If we
1285 * have received any out-of-order packets (i.e. if the receive
1286 * queue remains non-empty after processing) then send the ACK
1287 * immediately in order to trigger Fast Retransmission.
1288 */
1289 if ( list_empty ( &tcp->rx_queue ) ) {
1290 process_add ( &tcp->process );
1291 } else {
1292 tcp_xmit ( tcp );
1293 }
1294
1295 /* If this packet was the last we expect to receive, set up
1296 * timer to expire and cause the connection to be freed.
1297 */
1298 if ( TCP_CLOSED_GRACEFULLY ( tcp->tcp_state ) ) {
1299 stop_timer ( &tcp->wait );
1300 start_timer_fixed ( &tcp->wait, ( 2 * TCP_MSL ) );
1301 }
1302
1303 /* Notify application if window has changed */
1304 if ( tcp_xfer_window ( tcp ) != old_xfer_window )
1305 xfer_window_changed ( &tcp->xfer );
1306
1307 profile_stop ( &tcp_rx_profiler );
1308 return 0;
1309
1310 discard:
1311 /* Free received packet */
1312 free_iob ( iobuf );
1313 return rc;
1314 }
1315
1316 /** TCP protocol */
1317 struct tcpip_protocol tcp_protocol __tcpip_protocol = {
1318 .name = "TCP",
1319 .rx = tcp_rx,
1320 .tcpip_proto = IP_TCP,
1321 };
1322
1323 /**
1324 * Discard some cached TCP data
1325 *
1326 * @ret discarded Number of cached items discarded
1327 */
1328 static unsigned int tcp_discard ( void ) {
1329 struct tcp_connection *tcp;
1330 struct io_buffer *iobuf;
1331 struct tcp_rx_queued_header *tcpqhdr;
1332 uint32_t max_win;
1333 unsigned int discarded = 0;
1334
1335 /* Try to drop one queued RX packet from each connection */
1336 list_for_each_entry ( tcp, &tcp_conns, list ) {
1337 list_for_each_entry_reverse ( iobuf, &tcp->rx_queue, list ) {
1338
1339 /* Limit window to prevent future discards */
1340 tcpqhdr = iobuf->data;
1341 max_win = ( tcpqhdr->seq - tcp->rcv_ack );
1342 if ( max_win < tcp->max_rcv_win ) {
1343 DBGC ( tcp, "TCP %p reducing maximum window "
1344 "from %d to %d\n",
1345 tcp, tcp->max_rcv_win, max_win );
1346 tcp->max_rcv_win = max_win;
1347 }
1348
1349 /* Remove packet from queue */
1350 list_del ( &iobuf->list );
1351 free_iob ( iobuf );
1352
1353 /* Report discard */
1354 discarded++;
1355 break;
1356 }
1357 }
1358
1359 return discarded;
1360 }
1361
1362 /** TCP cache discarder */
1363 struct cache_discarder tcp_discarder __cache_discarder ( CACHE_NORMAL ) = {
1364 .discard = tcp_discard,
1365 };
1366
1367 /**
1368 * Shut down all TCP connections
1369 *
1370 */
1371 static void tcp_shutdown ( int booting __unused ) {
1372 struct tcp_connection *tcp;
1373
1374 while ( ( tcp = list_first_entry ( &tcp_conns, struct tcp_connection,
1375 list ) ) != NULL ) {
1376 tcp->tcp_state = TCP_CLOSED;
1377 tcp_dump_state ( tcp );
1378 tcp_close ( tcp, -ECANCELED );
1379 }
1380 }
1381
1382 /** TCP shutdown function */
1383 struct startup_fn tcp_startup_fn __startup_fn ( STARTUP_EARLY ) = {
1384 .shutdown = tcp_shutdown,
1385 };
1386
1387 /***************************************************************************
1388 *
1389 * Data transfer interface
1390 *
1391 ***************************************************************************
1392 */
1393
1394 /**
1395 * Close interface
1396 *
1397 * @v tcp TCP connection
1398 * @v rc Reason for close
1399 */
1400 static void tcp_xfer_close ( struct tcp_connection *tcp, int rc ) {
1401
1402 /* Close data transfer interface */
1403 tcp_close ( tcp, rc );
1404
1405 /* Transmit FIN, if possible */
1406 tcp_xmit ( tcp );
1407 }
1408
1409 /**
1410 * Deliver datagram as I/O buffer
1411 *
1412 * @v tcp TCP connection
1413 * @v iobuf Datagram I/O buffer
1414 * @v meta Data transfer metadata
1415 * @ret rc Return status code
1416 */
1417 static int tcp_xfer_deliver ( struct tcp_connection *tcp,
1418 struct io_buffer *iobuf,
1419 struct xfer_metadata *meta __unused ) {
1420
1421 /* Enqueue packet */
1422 list_add_tail ( &iobuf->list, &tcp->tx_queue );
1423
1424 /* Each enqueued packet is a pending operation */
1425 pending_get ( &tcp->pending_data );
1426
1427 /* Transmit data, if possible */
1428 tcp_xmit ( tcp );
1429
1430 return 0;
1431 }
1432
1433 /** TCP data transfer interface operations */
1434 static struct interface_operation tcp_xfer_operations[] = {
1435 INTF_OP ( xfer_deliver, struct tcp_connection *, tcp_xfer_deliver ),
1436 INTF_OP ( xfer_window, struct tcp_connection *, tcp_xfer_window ),
1437 INTF_OP ( intf_close, struct tcp_connection *, tcp_xfer_close ),
1438 };
1439
1440 /** TCP data transfer interface descriptor */
1441 static struct interface_descriptor tcp_xfer_desc =
1442 INTF_DESC ( struct tcp_connection, xfer, tcp_xfer_operations );
1443
1444 /***************************************************************************
1445 *
1446 * Openers
1447 *
1448 ***************************************************************************
1449 */
1450
1451 /** TCP IPv4 socket opener */
1452 struct socket_opener tcp_ipv4_socket_opener __socket_opener = {
1453 .semantics = TCP_SOCK_STREAM,
1454 .family = AF_INET,
1455 .open = tcp_open,
1456 };
1457
1458 /** TCP IPv6 socket opener */
1459 struct socket_opener tcp_ipv6_socket_opener __socket_opener = {
1460 .semantics = TCP_SOCK_STREAM,
1461 .family = AF_INET6,
1462 .open = tcp_open,
1463 };
1464
1465 /** Linkage hack */
1466 int tcp_sock_stream = TCP_SOCK_STREAM;
1467
1468 /**
1469 * Open TCP URI
1470 *
1471 * @v xfer Data transfer interface
1472 * @v uri URI
1473 * @ret rc Return status code
1474 */
1475 static int tcp_open_uri ( struct interface *xfer, struct uri *uri ) {
1476 struct sockaddr_tcpip peer;
1477
1478 /* Sanity check */
1479 if ( ! uri->host )
1480 return -EINVAL;
1481
1482 memset ( &peer, 0, sizeof ( peer ) );
1483 peer.st_port = htons ( uri_port ( uri, 0 ) );
1484 return xfer_open_named_socket ( xfer, SOCK_STREAM,
1485 ( struct sockaddr * ) &peer,
1486 uri->host, NULL );
1487 }
1488
1489 /** TCP URI opener */
1490 struct uri_opener tcp_uri_opener __uri_opener = {
1491 .scheme = "tcp",
1492 .open = tcp_open_uri,
1493 };
1494