2 utcp.c -- Userspace TCP
3 Copyright (C) 2014 Guus Sliepen <guus@tinc-vpn.org>
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License along
16 with this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
31 #include <sys/socket.h>
33 #include "utcp_priv.h"
48 #define timersub(a, b, r) do {\
49 (r)->tv_sec = (a)->tv_sec - (b)->tv_sec;\
50 (r)->tv_usec = (a)->tv_usec - (b)->tv_usec;\
52 (r)->tv_sec--, (r)->tv_usec += 1000000;\
56 static inline size_t max(size_t a, size_t b) {
63 static void debug(const char *format, ...) {
66 vfprintf(stderr, format, ap);
70 static void print_packet(struct utcp *utcp, const char *dir, const void *pkt, size_t len) {
72 if(len < sizeof hdr) {
73 debug("%p %s: short packet (%zu bytes)\n", utcp, dir, len);
77 memcpy(&hdr, pkt, sizeof hdr);
78 fprintf (stderr, "%p %s: len=%zu, src=%u dst=%u seq=%u ack=%u wnd=%u ctl=", utcp, dir, len, hdr.src, hdr.dst, hdr.seq, hdr.ack, hdr.wnd);
88 if(len > sizeof hdr) {
89 uint32_t datalen = len - sizeof hdr;
90 uint8_t *str = malloc((datalen << 1) + 7);
92 debug("out of memory");
95 memcpy(str, " data=", 6);
96 uint8_t *strptr = str + 6;
97 const uint8_t *data = pkt;
98 const uint8_t *dataend = data + datalen;
100 while(data != dataend) {
101 *strptr = (*data >> 4) > 9? (*data >> 4) + 55 : (*data >> 4) + 48;
103 *strptr = (*data & 0xf) > 9? (*data & 0xf) + 55 : (*data & 0xf) + 48;
117 #define print_packet(...)
120 static void set_state(struct utcp_connection *c, enum state state) {
122 if(state == ESTABLISHED)
123 timerclear(&c->conn_timeout);
124 debug("%p new state: %s\n", c->utcp, strstate[state]);
127 static bool fin_wanted(struct utcp_connection *c, uint32_t seq) {
128 if(seq != c->snd.last)
140 static inline void list_connections(struct utcp *utcp) {
141 debug("%p has %d connections:\n", utcp, utcp->nconnections);
142 for(int i = 0; i < utcp->nconnections; i++)
143 debug(" %u -> %u state %s\n", utcp->connections[i]->src, utcp->connections[i]->dst, strstate[utcp->connections[i]->state]);
146 static int32_t seqdiff(uint32_t a, uint32_t b) {
151 // TODO: convert to ringbuffers to avoid memmove() operations.
153 // Store data into the buffer
154 static ssize_t buffer_put_at(struct buffer *buf, size_t offset, const void *data, size_t len) {
155 if(buf->maxsize <= buf->used)
158 debug("buffer_put_at %zu %zu %zu\n", buf->used, offset, len);
160 size_t required = offset + len;
161 if(required > buf->maxsize) {
162 if(offset >= buf->maxsize)
165 len = buf->maxsize - offset;
166 required = buf->maxsize;
169 if(required > buf->size) {
170 size_t newsize = buf->size;
176 } while(newsize < buf->used + len);
178 if(newsize > buf->maxsize)
179 newsize = buf->maxsize;
180 char *newdata = realloc(buf->data, newsize);
187 memcpy(buf->data + offset, data, len);
188 if(required > buf->used)
189 buf->used = required;
193 static ssize_t buffer_put(struct buffer *buf, const void *data, size_t len) {
194 return buffer_put_at(buf, buf->used, data, len);
197 // Get data from the buffer. data can be NULL.
198 static ssize_t buffer_get(struct buffer *buf, void *data, size_t len) {
202 memcpy(data, buf->data, len);
204 memmove(buf->data, buf->data + len, buf->used - len);
209 // Copy data from the buffer without removing it.
210 static ssize_t buffer_copy(struct buffer *buf, void *data, size_t offset, size_t len) {
211 if(offset >= buf->used)
213 if(offset + len > buf->used)
214 len = buf->used - offset;
215 memcpy(data, buf->data + offset, len);
219 static bool buffer_init(struct buffer *buf, uint32_t len, uint32_t maxlen) {
220 memset(buf, 0, sizeof *buf);
222 buf->data = malloc(len);
227 buf->maxsize = maxlen;
231 static void buffer_exit(struct buffer *buf) {
233 memset(buf, 0, sizeof *buf);
236 static uint32_t buffer_free(const struct buffer *buf) {
237 return buf->maxsize - buf->used;
240 // Connections are stored in a sorted list.
241 // This gives O(log(N)) lookup time, O(N log(N)) insertion time and O(N) deletion time.
243 static int compare(const void *va, const void *vb) {
246 const struct utcp_connection *a = *(struct utcp_connection **)va;
247 const struct utcp_connection *b = *(struct utcp_connection **)vb;
250 assert(a->src && b->src);
252 int c = (int)a->src - (int)b->src;
255 c = (int)a->dst - (int)b->dst;
259 static struct utcp_connection *find_connection(const struct utcp *utcp, uint16_t src, uint16_t dst) {
260 if(!utcp->nconnections)
262 struct utcp_connection key = {
266 struct utcp_connection **match = bsearch(&keyp, utcp->connections, utcp->nconnections, sizeof *utcp->connections, compare);
267 return match ? *match : NULL;
270 static void free_connection(struct utcp_connection *c) {
271 struct utcp *utcp = c->utcp;
272 struct utcp_connection **cp = bsearch(&c, utcp->connections, utcp->nconnections, sizeof *utcp->connections, compare);
276 int i = cp - utcp->connections;
277 memmove(cp, cp + 1, (utcp->nconnections - i - 1) * sizeof *cp);
278 utcp->nconnections--;
280 buffer_exit(&c->rcvbuf);
281 buffer_exit(&c->sndbuf);
285 static struct utcp_connection *allocate_connection(struct utcp *utcp, uint16_t src, uint16_t dst) {
286 // Check whether this combination of src and dst is free
289 if(find_connection(utcp, src, dst)) {
293 } else { // If src == 0, generate a random port number with the high bit set
294 if(utcp->nconnections >= 32767) {
298 src = rand() | 0x8000;
299 while(find_connection(utcp, src, dst))
303 // Allocate memory for the new connection
305 if(utcp->nconnections >= utcp->nallocated) {
306 if(!utcp->nallocated)
307 utcp->nallocated = 4;
309 utcp->nallocated *= 2;
310 struct utcp_connection **new_array = realloc(utcp->connections, utcp->nallocated * sizeof *utcp->connections);
313 utcp->connections = new_array;
316 struct utcp_connection *c = calloc(1, sizeof *c);
320 if(!buffer_init(&c->sndbuf, DEFAULT_SNDBUFSIZE, DEFAULT_MAXSNDBUFSIZE)) {
325 if(!buffer_init(&c->rcvbuf, DEFAULT_RCVBUFSIZE, DEFAULT_MAXRCVBUFSIZE)) {
326 buffer_exit(&c->sndbuf);
331 // Fill in the details
340 c->snd.una = c->snd.iss;
341 c->snd.nxt = c->snd.iss + 1;
342 c->rcv.wnd = utcp->mtu;
343 c->snd.last = c->snd.nxt;
344 c->snd.cwnd = utcp->mtu;
347 // Add it to the sorted list of connections
349 utcp->connections[utcp->nconnections++] = c;
350 qsort(utcp->connections, utcp->nconnections, sizeof *utcp->connections, compare);
355 // Update RTT variables. See RFC 6298.
356 static void update_rtt(struct utcp_connection *c, uint32_t rtt) {
358 debug("invalid rtt\n");
362 struct utcp *utcp = c->utcp;
366 utcp->rttvar = rtt / 2;
367 utcp->rto = rtt + max(2 * rtt, CLOCK_GRANULARITY);
369 utcp->rttvar = (utcp->rttvar * 3 + abs(utcp->srtt - rtt)) / 4;
370 utcp->srtt = (utcp->srtt * 7 + rtt) / 8;
371 utcp->rto = utcp->srtt + max(utcp->rttvar, CLOCK_GRANULARITY);
374 if(utcp->rto > MAX_RTO)
377 debug("rtt %u srtt %u rttvar %u rto %u\n", rtt, utcp->srtt, utcp->rttvar, utcp->rto);
380 static void start_retransmit_timer(struct utcp_connection *c) {
381 gettimeofday(&c->rtrx_timeout, NULL);
382 c->rtrx_timeout.tv_usec += c->utcp->rto;
383 while(c->rtrx_timeout.tv_usec >= 1000000) {
384 c->rtrx_timeout.tv_usec -= 1000000;
385 c->rtrx_timeout.tv_sec++;
387 debug("timeout set to %lu.%06lu (%u)\n", c->rtrx_timeout.tv_sec, c->rtrx_timeout.tv_usec, c->utcp->rto);
390 static void stop_retransmit_timer(struct utcp_connection *c) {
391 timerclear(&c->rtrx_timeout);
392 debug("timeout cleared\n");
395 struct utcp_connection *utcp_connect(struct utcp *utcp, uint16_t dst, utcp_recv_t recv, void *priv) {
396 struct utcp_connection *c = allocate_connection(utcp, 0, dst);
407 hdr.seq = c->snd.iss;
409 hdr.wnd = c->rcv.wnd;
413 set_state(c, SYN_SENT);
415 print_packet(utcp, "send", &hdr, sizeof hdr);
416 utcp->send(utcp, &hdr, sizeof hdr);
418 gettimeofday(&c->conn_timeout, NULL);
419 c->conn_timeout.tv_sec += utcp->timeout;
424 void utcp_accept(struct utcp_connection *c, utcp_recv_t recv, void *priv) {
425 if(c->reapable || c->state != SYN_RECEIVED) {
426 debug("Error: accept() called on invalid connection %p in state %s\n", c, strstate[c->state]);
430 debug("%p accepted, %p %p\n", c, recv, priv);
433 set_state(c, ESTABLISHED);
436 static void ack(struct utcp_connection *c, bool sendatleastone) {
437 int32_t left = seqdiff(c->snd.last, c->snd.nxt);
438 int32_t cwndleft = c->snd.cwnd - seqdiff(c->snd.nxt, c->snd.una);
439 debug("cwndleft = %d\n", cwndleft);
449 if(!left && !sendatleastone)
457 pkt = malloc(sizeof pkt->hdr + c->utcp->mtu);
461 pkt->hdr.src = c->src;
462 pkt->hdr.dst = c->dst;
463 pkt->hdr.ack = c->rcv.nxt;
464 pkt->hdr.wnd = c->snd.wnd;
469 uint32_t seglen = left > c->utcp->mtu ? c->utcp->mtu : left;
470 pkt->hdr.seq = c->snd.nxt;
472 buffer_copy(&c->sndbuf, pkt->data, seqdiff(c->snd.nxt, c->snd.una), seglen);
474 c->snd.nxt += seglen;
477 if(seglen && fin_wanted(c, c->snd.nxt)) {
482 if(!c->rtt_start.tv_sec) {
483 // Start RTT measurement
484 gettimeofday(&c->rtt_start, NULL);
485 c->rtt_seq = pkt->hdr.seq + seglen;
486 debug("Starting RTT measurement, expecting ack %u\n", c->rtt_seq);
489 print_packet(c->utcp, "send", pkt, sizeof pkt->hdr + seglen);
490 c->utcp->send(c->utcp, pkt, sizeof pkt->hdr + seglen);
496 ssize_t utcp_send(struct utcp_connection *c, const void *data, size_t len) {
498 debug("Error: send() called on closed connection %p\n", c);
508 debug("Error: send() called on unconnected connection %p\n", c);
519 debug("Error: send() called on closing connection %p\n", c);
524 // Add data to send buffer
534 len = buffer_put(&c->sndbuf, data, len);
542 if(!timerisset(&c->rtrx_timeout))
543 start_retransmit_timer(c);
547 static void swap_ports(struct hdr *hdr) {
548 uint16_t tmp = hdr->src;
553 static void retransmit(struct utcp_connection *c) {
554 if(c->state == CLOSED || c->snd.last == c->snd.una) {
555 debug("Retransmit() called but nothing to retransmit!\n");
556 stop_retransmit_timer(c);
560 struct utcp *utcp = c->utcp;
567 pkt = malloc(sizeof pkt->hdr + c->utcp->mtu);
571 pkt->hdr.src = c->src;
572 pkt->hdr.dst = c->dst;
573 pkt->hdr.wnd = c->rcv.wnd;
578 // Send our SYN again
579 pkt->hdr.seq = c->snd.iss;
582 print_packet(c->utcp, "rtrx", pkt, sizeof pkt->hdr);
583 utcp->send(utcp, pkt, sizeof pkt->hdr);
588 pkt->hdr.seq = c->snd.nxt;
589 pkt->hdr.ack = c->rcv.nxt;
590 pkt->hdr.ctl = SYN | ACK;
591 print_packet(c->utcp, "rtrx", pkt, sizeof pkt->hdr);
592 utcp->send(utcp, pkt, sizeof pkt->hdr);
600 // Send unacked data again.
601 pkt->hdr.seq = c->snd.una;
602 pkt->hdr.ack = c->rcv.nxt;
604 uint32_t len = seqdiff(c->snd.last, c->snd.una);
607 if(fin_wanted(c, c->snd.una + len)) {
611 c->snd.nxt = c->snd.una + len;
612 c->snd.cwnd = utcp->mtu; // reduce cwnd on retransmit
613 buffer_copy(&c->sndbuf, pkt->data, 0, len);
614 print_packet(c->utcp, "rtrx", pkt, sizeof pkt->hdr + len);
615 utcp->send(utcp, pkt, sizeof pkt->hdr + len);
622 // We shouldn't need to retransmit anything in this state.
626 stop_retransmit_timer(c);
630 start_retransmit_timer(c);
632 if(utcp->rto > MAX_RTO)
634 c->rtt_start.tv_sec = 0; // invalidate RTT timer
640 /* Update receive buffer and SACK entries after consuming data.
644 * |.....0000..1111111111.....22222......3333|
647 * 0..3 represent the SACK entries. The ^ indicates up to which point we want
648 * to remove data from the receive buffer. The idea is to substract "len"
649 * from the offset of all the SACK entries, and then remove/cut down entries
650 * that are shifted to before the start of the receive buffer.
652 * There are three cases:
653 * - the SACK entry is after ^, in that case just change the offset.
654 * - the SACK entry starts before and ends after ^, so we have to
655 * change both its offset and size.
656 * - the SACK entry is completely before ^, in that case delete it.
658 static void sack_consume(struct utcp_connection *c, size_t len) {
659 debug("sack_consume %zu\n", len);
660 if(len > c->rcvbuf.used)
663 buffer_get(&c->rcvbuf, NULL, len);
665 for(int i = 0; i < NSACKS && c->sacks[i].len; ) {
666 if(len < c->sacks[i].offset) {
667 c->sacks[i].offset -= len;
669 } else if(len < c->sacks[i].offset + c->sacks[i].len) {
670 c->sacks[i].len -= len - c->sacks[i].offset;
671 c->sacks[i].offset = 0;
675 memmove(&c->sacks[i], &c->sacks[i + 1], (NSACKS - 1 - i) * sizeof c->sacks[i]);
676 c->sacks[NSACKS - 1].len = 0;
684 for(int i = 0; i < NSACKS && c->sacks[i].len; i++)
685 debug("SACK[%d] offset %u len %u\n", i, c->sacks[i].offset, c->sacks[i].len);
688 static void handle_out_of_order(struct utcp_connection *c, uint32_t offset, const void *data, size_t len) {
689 debug("out of order packet, offset %u\n", offset);
690 // Packet loss or reordering occured. Store the data in the buffer.
691 ssize_t rxd = buffer_put_at(&c->rcvbuf, offset, data, len);
695 // Make note of where we put it.
696 for(int i = 0; i < NSACKS; i++) {
697 if(!c->sacks[i].len) { // nothing to merge, add new entry
698 debug("New SACK entry %d\n", i);
699 c->sacks[i].offset = offset;
700 c->sacks[i].len = rxd;
702 } else if(offset < c->sacks[i].offset) {
703 if(offset + rxd < c->sacks[i].offset) { // insert before
704 if(!c->sacks[NSACKS - 1].len) { // only if room left
705 debug("Insert SACK entry at %d\n", i);
706 memmove(&c->sacks[i + 1], &c->sacks[i], (NSACKS - i - 1) * sizeof c->sacks[i]);
707 c->sacks[i].offset = offset;
708 c->sacks[i].len = rxd;
710 debug("SACK entries full, dropping packet\n");
714 debug("Merge with start of SACK entry at %d\n", i);
715 c->sacks[i].offset = offset;
718 } else if(offset <= c->sacks[i].offset + c->sacks[i].len) {
719 if(offset + rxd > c->sacks[i].offset + c->sacks[i].len) { // merge
720 debug("Merge with end of SACK entry at %d\n", i);
721 c->sacks[i].len = offset + rxd - c->sacks[i].offset;
722 // TODO: handle potential merge with next entry
728 for(int i = 0; i < NSACKS && c->sacks[i].len; i++)
729 debug("SACK[%d] offset %u len %u\n", i, c->sacks[i].offset, c->sacks[i].len);
732 static void handle_in_order(struct utcp_connection *c, const void *data, size_t len) {
733 // Check if we can process out-of-order data now.
734 if(c->sacks[0].len && len >= c->sacks[0].offset) { // TODO: handle overlap with second SACK
735 debug("incoming packet len %zu connected with SACK at %u\n", len, c->sacks[0].offset);
736 buffer_put_at(&c->rcvbuf, 0, data, len); // TODO: handle return value
737 len = max(len, c->sacks[0].offset + c->sacks[0].len);
738 data = c->rcvbuf.data;
742 ssize_t rxd = c->recv(c, data, len);
744 // TODO: handle the application not accepting all data.
750 sack_consume(c, len);
756 static void handle_incoming_data(struct utcp_connection *c, uint32_t seq, const void *data, size_t len) {
757 uint32_t offset = seqdiff(seq, c->rcv.nxt);
758 if(offset + len > c->rcvbuf.maxsize)
762 handle_out_of_order(c, offset, data, len);
764 handle_in_order(c, data, len);
768 ssize_t utcp_recv(struct utcp *utcp, const void *data, size_t len) {
782 print_packet(utcp, "recv", data, len);
784 // Drop packets smaller than the header
787 if(len < sizeof hdr) {
792 // Make a copy from the potentially unaligned data to a struct hdr
794 memcpy(&hdr, data, sizeof hdr);
798 // Drop packets with an unknown CTL flag
800 if(hdr.ctl & ~(SYN | ACK | RST | FIN)) {
805 // Try to match the packet to an existing connection
807 struct utcp_connection *c = find_connection(utcp, hdr.dst, hdr.src);
809 // Is it for a new connection?
812 // Ignore RST packets
817 // Is it a SYN packet and are we LISTENing?
819 if(hdr.ctl & SYN && !(hdr.ctl & ACK) && utcp->accept) {
820 // If we don't want to accept it, send a RST back
821 if((utcp->pre_accept && !utcp->pre_accept(utcp, hdr.dst))) {
826 // Try to allocate memory, otherwise send a RST back
827 c = allocate_connection(utcp, hdr.dst, hdr.src);
833 // Return SYN+ACK, go to SYN_RECEIVED state
834 c->snd.wnd = hdr.wnd;
835 c->rcv.irs = hdr.seq;
836 c->rcv.nxt = c->rcv.irs + 1;
837 set_state(c, SYN_RECEIVED);
841 hdr.ack = c->rcv.irs + 1;
842 hdr.seq = c->snd.iss;
844 print_packet(c->utcp, "send", &hdr, sizeof hdr);
845 utcp->send(utcp, &hdr, sizeof hdr);
847 // No, we don't want your packets, send a RST back
855 debug("%p state %s\n", c->utcp, strstate[c->state]);
857 // In case this is for a CLOSED connection, ignore the packet.
858 // TODO: make it so incoming packets can never match a CLOSED connection.
860 if(c->state == CLOSED)
863 // It is for an existing connection.
865 uint32_t prevrcvnxt = c->rcv.nxt;
867 // 1. Drop invalid packets.
869 // 1a. Drop packets that should not happen in our current state.
889 // 1b. Drop packets with a sequence number not in our receive window.
893 if(c->state == SYN_SENT)
896 acceptable = seqdiff(hdr.seq, c->rcv.nxt) >= 0;
898 int32_t rcv_offset = seqdiff(hdr.seq, c->rcv.nxt);
900 // cut already accepted front overlapping
902 acceptable = rcv_offset + len >= 0;
909 acceptable = seqdiff(hdr.seq, c->rcv.nxt) >= 0 && seqdiff(hdr.seq, c->rcv.nxt) + len <= c->rcvbuf.maxsize;
913 debug("Packet not acceptable, %u <= %u + %zu < %u\n", c->rcv.nxt, hdr.seq, len, c->rcv.nxt + c->rcvbuf.maxsize);
914 // Ignore unacceptable RST packets.
917 // Otherwise, send an ACK back in the hope things improve.
922 c->snd.wnd = hdr.wnd; // TODO: move below
924 // 1c. Drop packets with an invalid ACK.
925 // ackno should not roll back, and it should also not be bigger than what we ever could have sent
926 // (= snd.una + c->sndbuf.used).
928 if(hdr.ctl & ACK && (seqdiff(hdr.ack, c->snd.last) > 0 || seqdiff(hdr.ack, c->snd.una) < 0)) {
929 debug("Packet ack seqno out of range, %u <= %u < %u\n", c->snd.una, hdr.ack, c->snd.una + c->sndbuf.used);
930 // Ignore unacceptable RST packets.
936 // 2. Handle RST packets
943 // The peer has refused our connection.
944 set_state(c, CLOSED);
945 errno = ECONNREFUSED;
952 // We haven't told the application about this connection yet. Silently delete.
961 // The peer has aborted our connection.
962 set_state(c, CLOSED);
972 // As far as the application is concerned, the connection has already been closed.
973 // If it has called utcp_close() already, we can immediately free this connection.
978 // Otherwise, immediately move to the CLOSED state.
979 set_state(c, CLOSED);
989 // 3. Advance snd.una
991 uint32_t advanced = seqdiff(hdr.ack, c->snd.una);
992 prevrcvnxt = c->rcv.nxt;
996 if(c->rtt_start.tv_sec) {
997 if(c->rtt_seq == hdr.ack) {
998 struct timeval now, diff;
999 gettimeofday(&now, NULL);
1000 timersub(&now, &c->rtt_start, &diff);
1001 update_rtt(c, diff.tv_sec * 1000000 + diff.tv_usec);
1002 c->rtt_start.tv_sec = 0;
1003 } else if(c->rtt_seq < hdr.ack) {
1004 debug("Cancelling RTT measurement: %u < %u\n", c->rtt_seq, hdr.ack);
1005 c->rtt_start.tv_sec = 0;
1009 int32_t data_acked = advanced;
1016 // TODO: handle FIN as well.
1021 assert(data_acked >= 0);
1023 int32_t bufused = seqdiff(c->snd.last, c->snd.una);
1024 assert(data_acked <= bufused);
1027 buffer_get(&c->sndbuf, NULL, data_acked);
1029 // Also advance snd.nxt if possible
1030 if(seqdiff(c->snd.nxt, hdr.ack) < 0)
1031 c->snd.nxt = hdr.ack;
1033 c->snd.una = hdr.ack;
1036 c->snd.cwnd += utcp->mtu;
1037 if(c->snd.cwnd > c->sndbuf.maxsize)
1038 c->snd.cwnd = c->sndbuf.maxsize;
1040 // Check if we have sent a FIN that is now ACKed.
1043 if(c->snd.una == c->snd.last)
1044 set_state(c, FIN_WAIT_2);
1047 if(c->snd.una == c->snd.last) {
1048 gettimeofday(&c->conn_timeout, NULL);
1049 c->conn_timeout.tv_sec += 60;
1050 set_state(c, TIME_WAIT);
1059 if(c->dupack == 3) {
1060 debug("Triplicate ACK\n");
1061 //TODO: Resend one packet and go to fast recovery mode. See RFC 6582.
1062 //We do a very simple variant here; reset the nxt pointer to the last acknowledged packet from the peer.
1063 //Reset the congestion window so we wait for ACKs.
1064 c->snd.nxt = c->snd.una;
1065 c->snd.cwnd = utcp->mtu;
1066 start_retransmit_timer(c);
1074 timerclear(&c->conn_timeout); // It will be set anew in utcp_timeout() if c->snd.una != c->snd.nxt.
1075 if(c->snd.una == c->snd.last)
1076 stop_retransmit_timer(c);
1078 start_retransmit_timer(c);
1081 // 5. Process SYN stuff
1086 // This is a SYNACK. It should always have ACKed the SYN.
1089 c->rcv.irs = hdr.seq;
1090 c->rcv.nxt = hdr.seq;
1091 set_state(c, ESTABLISHED);
1092 // TODO: notify application of this somehow.
1102 // Ehm, no. We should never receive a second SYN.
1111 // SYN counts as one sequence number
1115 // 6. Process new data
1117 if(c->state == SYN_RECEIVED) {
1118 // This is the ACK after the SYNACK. It should always have ACKed the SYNACK.
1122 // Are we still LISTENing?
1124 utcp->accept(c, c->src);
1126 if(c->state != ESTABLISHED) {
1127 set_state(c, CLOSED);
1137 // This should never happen.
1150 // Ehm no, We should never receive more data after a FIN.
1159 handle_incoming_data(c, hdr.seq, data, len);
1162 // 7. Process FIN stuff
1164 if((hdr.ctl & FIN) && hdr.seq + len == c->rcv.nxt) {
1168 // This should never happen.
1174 set_state(c, CLOSE_WAIT);
1177 set_state(c, CLOSING);
1180 gettimeofday(&c->conn_timeout, NULL);
1181 c->conn_timeout.tv_sec += 60;
1182 set_state(c, TIME_WAIT);
1188 // Ehm, no. We should never receive a second FIN.
1197 // FIN counts as one sequence number
1201 // Inform the application that the peer closed the connection.
1204 c->recv(c, NULL, 0);
1208 // Now we send something back if:
1209 // - we advanced rcv.nxt (ie, we got some data that needs to be ACKed)
1210 // -> sendatleastone = true
1211 // - or we got an ack, so we should maybe send a bit more data
1212 // -> sendatleastone = false
1214 ack(c, len || prevrcvnxt != c->rcv.nxt);
1224 hdr.ack = hdr.seq + len;
1226 hdr.ctl = RST | ACK;
1228 print_packet(utcp, "send", &hdr, sizeof hdr);
1229 utcp->send(utcp, &hdr, sizeof hdr);
1234 int utcp_shutdown(struct utcp_connection *c, int dir) {
1235 debug("%p shutdown %d at %u\n", c ? c->utcp : NULL, dir, c ? c->snd.last : 0);
1242 debug("Error: shutdown() called on closed connection %p\n", c);
1247 if(!(dir == UTCP_SHUT_RD || dir == UTCP_SHUT_WR || dir == UTCP_SHUT_RDWR)) {
1252 // TCP does not have a provision for stopping incoming packets.
1253 // The best we can do is to just ignore them.
1254 if(dir == UTCP_SHUT_RD || dir == UTCP_SHUT_RDWR)
1257 // The rest of the code deals with shutting down writes.
1258 if(dir == UTCP_SHUT_RD)
1268 set_state(c, CLOSED);
1273 set_state(c, FIN_WAIT_1);
1279 set_state(c, CLOSING);
1291 if(!timerisset(&c->rtrx_timeout))
1292 start_retransmit_timer(c);
1296 int utcp_close(struct utcp_connection *c) {
1297 if(utcp_shutdown(c, SHUT_RDWR))
1305 int utcp_abort(struct utcp_connection *c) {
1312 debug("Error: abort() called on closed connection %p\n", c);
1329 set_state(c, CLOSED);
1337 set_state(c, CLOSED);
1347 hdr.seq = c->snd.nxt;
1352 print_packet(c->utcp, "send", &hdr, sizeof hdr);
1353 c->utcp->send(c->utcp, &hdr, sizeof hdr);
1358 * One call to this function will loop through all connections,
1359 * checking if something needs to be resent or not.
1360 * The return value is the time to the next timeout in milliseconds,
1361 * or maybe a negative value if the timeout is infinite.
1363 struct timeval utcp_timeout(struct utcp *utcp) {
1365 gettimeofday(&now, NULL);
1366 struct timeval next = {now.tv_sec + 3600, now.tv_usec};
1368 for(int i = 0; i < utcp->nconnections; i++) {
1369 struct utcp_connection *c = utcp->connections[i];
1373 // delete connections that have been utcp_close()d.
1374 if(c->state == CLOSED) {
1376 debug("Reaping %p\n", c);
1383 if(timerisset(&c->conn_timeout) && timercmp(&c->conn_timeout, &now, <)) {
1387 c->recv(c, NULL, 0);
1391 if(timerisset(&c->rtrx_timeout) && timercmp(&c->rtrx_timeout, &now, <)) {
1392 debug("retransmit()\n");
1396 if(c->poll && buffer_free(&c->sndbuf) && (c->state == ESTABLISHED || c->state == CLOSE_WAIT))
1397 c->poll(c, buffer_free(&c->sndbuf));
1399 if(timerisset(&c->conn_timeout) && timercmp(&c->conn_timeout, &next, <))
1400 next = c->conn_timeout;
1402 if(timerisset(&c->rtrx_timeout) && timercmp(&c->rtrx_timeout, &next, <))
1403 next = c->rtrx_timeout;
1406 struct timeval diff;
1407 timersub(&next, &now, &diff);
1411 bool utcp_is_active(struct utcp *utcp) {
1415 for(int i = 0; i < utcp->nconnections; i++)
1416 if(utcp->connections[i]->state != CLOSED && utcp->connections[i]->state != TIME_WAIT)
1422 struct utcp *utcp_init(utcp_accept_t accept, utcp_pre_accept_t pre_accept, utcp_send_t send, void *priv) {
1428 struct utcp *utcp = calloc(1, sizeof *utcp);
1432 utcp->accept = accept;
1433 utcp->pre_accept = pre_accept;
1436 utcp->mtu = DEFAULT_MTU;
1437 utcp->timeout = DEFAULT_USER_TIMEOUT; // s
1438 utcp->rto = START_RTO; // us
1443 void utcp_exit(struct utcp *utcp) {
1446 for(int i = 0; i < utcp->nconnections; i++) {
1447 if(!utcp->connections[i]->reapable)
1448 debug("Warning, freeing unclosed connection %p\n", utcp->connections[i]);
1449 buffer_exit(&utcp->connections[i]->rcvbuf);
1450 buffer_exit(&utcp->connections[i]->sndbuf);
1451 free(utcp->connections[i]);
1453 free(utcp->connections);
1457 uint16_t utcp_get_mtu(struct utcp *utcp) {
1458 return utcp ? utcp->mtu : 0;
1461 void utcp_set_mtu(struct utcp *utcp, uint16_t mtu) {
1462 // TODO: handle overhead of the header
1467 int utcp_get_user_timeout(struct utcp *u) {
1468 return u ? u->timeout : 0;
1471 void utcp_set_user_timeout(struct utcp *u, int timeout) {
1473 u->timeout = timeout;
1476 size_t utcp_get_sndbuf(struct utcp_connection *c) {
1477 return c ? c->sndbuf.maxsize : 0;
1480 size_t utcp_get_sndbuf_free(struct utcp_connection *c) {
1481 if(c && (c->state == ESTABLISHED || c->state == CLOSE_WAIT))
1482 return buffer_free(&c->sndbuf);
1487 void utcp_set_sndbuf(struct utcp_connection *c, size_t size) {
1490 c->sndbuf.maxsize = size;
1491 if(c->sndbuf.maxsize != size)
1492 c->sndbuf.maxsize = -1;
1495 size_t utcp_get_rcvbuf(struct utcp_connection *c) {
1496 return c ? c->rcvbuf.maxsize : 0;
1499 size_t utcp_get_rcvbuf_free(struct utcp_connection *c) {
1500 if(c && (c->state == ESTABLISHED || c->state == CLOSE_WAIT))
1501 return buffer_free(&c->rcvbuf);
1506 void utcp_set_rcvbuf(struct utcp_connection *c, size_t size) {
1509 c->rcvbuf.maxsize = size;
1510 if(c->rcvbuf.maxsize != size)
1511 c->rcvbuf.maxsize = -1;
1514 bool utcp_get_nodelay(struct utcp_connection *c) {
1515 return c ? c->nodelay : false;
1518 void utcp_set_nodelay(struct utcp_connection *c, bool nodelay) {
1520 c->nodelay = nodelay;
1523 bool utcp_get_keepalive(struct utcp_connection *c) {
1524 return c ? c->keepalive : false;
1527 void utcp_set_keepalive(struct utcp_connection *c, bool keepalive) {
1529 c->keepalive = keepalive;
1532 size_t utcp_get_outq(struct utcp_connection *c) {
1533 return c ? seqdiff(c->snd.nxt, c->snd.una) : 0;
1536 void utcp_set_recv_cb(struct utcp_connection *c, utcp_recv_t recv) {
1541 void utcp_set_poll_cb(struct utcp_connection *c, utcp_poll_t poll) {
1546 void utcp_set_accept_cb(struct utcp *utcp, utcp_accept_t accept, utcp_pre_accept_t pre_accept) {
1548 utcp->accept = accept;
1549 utcp->pre_accept = pre_accept;