2 utcp.c -- Userspace TCP
3 Copyright (C) 2014-2017 Guus Sliepen <guus@tinc-vpn.org>
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License along
16 with this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
31 #include <sys/socket.h>
34 #include "utcp_priv.h"
49 #define timersub(a, b, r)\
51 (r)->tv_sec = (a)->tv_sec - (b)->tv_sec;\
52 (r)->tv_usec = (a)->tv_usec - (b)->tv_usec;\
54 (r)->tv_sec--, (r)->tv_usec += USEC_PER_SEC;\
58 static inline size_t min(size_t a, size_t b) {
62 static inline size_t max(size_t a, size_t b) {
69 #ifndef UTCP_DEBUG_DATALEN
70 #define UTCP_DEBUG_DATALEN 20
73 static void debug(struct utcp_connection *c, const char *format, ...) {
78 clock_gettime(CLOCK_REALTIME, &tv);
79 len = snprintf(buf, sizeof(buf), "%ld.%06lu %u:%u ", (long)tv.tv_sec, tv.tv_nsec / 1000, c ? c->src : 0, c ? c->dst : 0);
82 len += vsnprintf(buf + len, sizeof(buf) - len, format, ap);
85 if(len > 0 && (size_t)len < sizeof(buf)) {
86 fwrite(buf, len, 1, stderr);
90 static void print_packet(struct utcp_connection *c, const char *dir, const void *pkt, size_t len) {
93 if(len < sizeof(hdr)) {
94 debug(c, "%s: short packet (%lu bytes)\n", dir, (unsigned long)len);
98 memcpy(&hdr, pkt, sizeof(hdr));
102 if(len > sizeof(hdr)) {
103 datalen = min(len - sizeof(hdr), UTCP_DEBUG_DATALEN);
109 const uint8_t *data = (uint8_t *)pkt + sizeof(hdr);
110 char str[datalen * 2 + 1];
113 for(uint32_t i = 0; i < datalen; i++) {
114 *p++ = "0123456789ABCDEF"[data[i] >> 4];
115 *p++ = "0123456789ABCDEF"[data[i] & 15];
120 debug(c, "%s: len %lu src %u dst %u seq %u ack %u wnd %u aux %x ctl %s%s%s%s data %s\n",
121 dir, (unsigned long)len, hdr.src, hdr.dst, hdr.seq, hdr.ack, hdr.wnd, hdr.aux,
122 hdr.ctl & SYN ? "SYN" : "",
123 hdr.ctl & RST ? "RST" : "",
124 hdr.ctl & FIN ? "FIN" : "",
125 hdr.ctl & ACK ? "ACK" : "",
130 static void debug_cwnd(struct utcp_connection *c) {
131 debug(c, "snd.cwnd %u snd.ssthresh %u\n", c->snd.cwnd, c->snd.ssthresh);
134 #define debug(...) do {} while(0)
135 #define print_packet(...) do {} while(0)
136 #define debug_cwnd(...) do {} while(0)
139 static void set_state(struct utcp_connection *c, enum state state) {
142 if(state == ESTABLISHED) {
143 timerclear(&c->conn_timeout);
146 debug(c, "state %s\n", strstate[state]);
149 static bool fin_wanted(struct utcp_connection *c, uint32_t seq) {
150 if(seq != c->snd.last) {
165 static bool is_reliable(struct utcp_connection *c) {
166 return c->flags & UTCP_RELIABLE;
169 static int32_t seqdiff(uint32_t a, uint32_t b) {
174 // TODO: convert to ringbuffers to avoid memmove() operations.
176 // Store data into the buffer
177 static ssize_t buffer_put_at(struct buffer *buf, size_t offset, const void *data, size_t len) {
178 debug(NULL, "buffer_put_at %lu %lu %lu\n", (unsigned long)buf->used, (unsigned long)offset, (unsigned long)len);
180 size_t required = offset + len;
182 if(required > buf->maxsize) {
183 if(offset >= buf->maxsize) {
187 len = buf->maxsize - offset;
188 required = buf->maxsize;
191 if(required > buf->size) {
192 size_t newsize = buf->size;
199 } while(newsize < required);
202 if(newsize > buf->maxsize) {
203 newsize = buf->maxsize;
206 char *newdata = realloc(buf->data, newsize);
216 memcpy(buf->data + offset, data, len);
218 if(required > buf->used) {
219 buf->used = required;
225 static ssize_t buffer_put(struct buffer *buf, const void *data, size_t len) {
226 return buffer_put_at(buf, buf->used, data, len);
229 // Get data from the buffer. data can be NULL.
230 static ssize_t buffer_get(struct buffer *buf, void *data, size_t len) {
231 if(len > buf->used) {
236 memcpy(data, buf->data, len);
239 if(len < buf->used) {
240 memmove(buf->data, buf->data + len, buf->used - len);
247 // Copy data from the buffer without removing it.
248 static ssize_t buffer_copy(struct buffer *buf, void *data, size_t offset, size_t len) {
249 if(offset >= buf->used) {
253 if(offset + len > buf->used) {
254 len = buf->used - offset;
257 memcpy(data, buf->data + offset, len);
261 static bool buffer_init(struct buffer *buf, uint32_t len, uint32_t maxlen) {
262 memset(buf, 0, sizeof(*buf));
265 buf->data = malloc(len);
273 buf->maxsize = maxlen;
277 static void buffer_exit(struct buffer *buf) {
279 memset(buf, 0, sizeof(*buf));
282 static uint32_t buffer_free(const struct buffer *buf) {
283 return buf->maxsize - buf->used;
286 // Connections are stored in a sorted list.
287 // This gives O(log(N)) lookup time, O(N log(N)) insertion time and O(N) deletion time.
289 static int compare(const void *va, const void *vb) {
292 const struct utcp_connection *a = *(struct utcp_connection **)va;
293 const struct utcp_connection *b = *(struct utcp_connection **)vb;
296 assert(a->src && b->src);
298 int c = (int)a->src - (int)b->src;
304 c = (int)a->dst - (int)b->dst;
308 static struct utcp_connection *find_connection(const struct utcp *utcp, uint16_t src, uint16_t dst) {
309 if(!utcp->nconnections) {
313 struct utcp_connection key = {
317 struct utcp_connection **match = bsearch(&keyp, utcp->connections, utcp->nconnections, sizeof(*utcp->connections), compare);
318 return match ? *match : NULL;
321 static void free_connection(struct utcp_connection *c) {
322 struct utcp *utcp = c->utcp;
323 struct utcp_connection **cp = bsearch(&c, utcp->connections, utcp->nconnections, sizeof(*utcp->connections), compare);
327 int i = cp - utcp->connections;
328 memmove(cp, cp + 1, (utcp->nconnections - i - 1) * sizeof(*cp));
329 utcp->nconnections--;
331 buffer_exit(&c->rcvbuf);
332 buffer_exit(&c->sndbuf);
336 static struct utcp_connection *allocate_connection(struct utcp *utcp, uint16_t src, uint16_t dst) {
337 // Check whether this combination of src and dst is free
340 if(find_connection(utcp, src, dst)) {
344 } else { // If src == 0, generate a random port number with the high bit set
345 if(utcp->nconnections >= 32767) {
350 src = rand() | 0x8000;
352 while(find_connection(utcp, src, dst)) {
357 // Allocate memory for the new connection
359 if(utcp->nconnections >= utcp->nallocated) {
360 if(!utcp->nallocated) {
361 utcp->nallocated = 4;
363 utcp->nallocated *= 2;
366 struct utcp_connection **new_array = realloc(utcp->connections, utcp->nallocated * sizeof(*utcp->connections));
372 utcp->connections = new_array;
375 struct utcp_connection *c = calloc(1, sizeof(*c));
381 if(!buffer_init(&c->sndbuf, DEFAULT_SNDBUFSIZE, DEFAULT_MAXSNDBUFSIZE)) {
386 if(!buffer_init(&c->rcvbuf, DEFAULT_RCVBUFSIZE, DEFAULT_MAXRCVBUFSIZE)) {
387 buffer_exit(&c->sndbuf);
392 // Fill in the details
401 c->snd.una = c->snd.iss;
402 c->snd.nxt = c->snd.iss + 1;
403 c->snd.last = c->snd.nxt;
404 c->snd.cwnd = (utcp->mtu > 2190 ? 2 : utcp->mtu > 1095 ? 3 : 4) * utcp->mtu;
406 c->snd.ssthresh = c->sndbuf.maxsize;
408 c->snd.ssthresh = ~0;
413 // Add it to the sorted list of connections
415 utcp->connections[utcp->nconnections++] = c;
416 qsort(utcp->connections, utcp->nconnections, sizeof(*utcp->connections), compare);
421 static inline uint32_t absdiff(uint32_t a, uint32_t b) {
429 // Update RTT variables. See RFC 6298.
430 static void update_rtt(struct utcp_connection *c, uint32_t rtt) {
432 debug(c, "invalid rtt\n");
436 struct utcp *utcp = c->utcp;
440 utcp->rttvar = rtt / 2;
442 utcp->rttvar = (utcp->rttvar * 3 + absdiff(utcp->srtt, rtt)) / 4;
443 utcp->srtt = (utcp->srtt * 7 + rtt) / 8;
446 utcp->rto = utcp->srtt + max(4 * utcp->rttvar, CLOCK_GRANULARITY);
448 if(utcp->rto > MAX_RTO) {
452 debug(c, "rtt %u srtt %u rttvar %u rto %u\n", rtt, utcp->srtt, utcp->rttvar, utcp->rto);
455 static void start_retransmit_timer(struct utcp_connection *c) {
456 gettimeofday(&c->rtrx_timeout, NULL);
457 c->rtrx_timeout.tv_usec += c->utcp->rto;
459 while(c->rtrx_timeout.tv_usec >= 1000000) {
460 c->rtrx_timeout.tv_usec -= 1000000;
461 c->rtrx_timeout.tv_sec++;
464 debug(c, "rtrx_timeout %ld.%06lu\n", c->rtrx_timeout.tv_sec, c->rtrx_timeout.tv_usec);
467 static void stop_retransmit_timer(struct utcp_connection *c) {
468 timerclear(&c->rtrx_timeout);
469 debug(c, "rtrx_timeout cleared\n");
472 struct utcp_connection *utcp_connect_ex(struct utcp *utcp, uint16_t dst, utcp_recv_t recv, void *priv, uint32_t flags) {
473 struct utcp_connection *c = allocate_connection(utcp, 0, dst);
479 assert((flags & ~0x1f) == 0);
490 pkt.hdr.src = c->src;
491 pkt.hdr.dst = c->dst;
492 pkt.hdr.seq = c->snd.iss;
494 pkt.hdr.wnd = c->rcvbuf.maxsize;
496 pkt.hdr.aux = 0x0101;
500 pkt.init[3] = flags & 0x7;
502 set_state(c, SYN_SENT);
504 print_packet(c, "send", &pkt, sizeof(pkt));
505 utcp->send(utcp, &pkt, sizeof(pkt));
507 gettimeofday(&c->conn_timeout, NULL);
508 c->conn_timeout.tv_sec += utcp->timeout;
510 start_retransmit_timer(c);
515 struct utcp_connection *utcp_connect(struct utcp *utcp, uint16_t dst, utcp_recv_t recv, void *priv) {
516 return utcp_connect_ex(utcp, dst, recv, priv, UTCP_TCP);
519 void utcp_accept(struct utcp_connection *c, utcp_recv_t recv, void *priv) {
520 if(c->reapable || c->state != SYN_RECEIVED) {
521 debug(c, "accept() called on invalid connection in state %s\n", c, strstate[c->state]);
525 debug(c, "accepted %p %p\n", c, recv, priv);
528 set_state(c, ESTABLISHED);
531 static void ack(struct utcp_connection *c, bool sendatleastone) {
532 int32_t left = seqdiff(c->snd.last, c->snd.nxt);
533 int32_t cwndleft = min(c->snd.cwnd, c->snd.wnd) - seqdiff(c->snd.nxt, c->snd.una);
539 } else if(cwndleft < left) {
542 if(!sendatleastone || cwndleft > c->utcp->mtu) {
543 left -= left % c->utcp->mtu;
547 debug(c, "cwndleft %d left %d\n", cwndleft, left);
549 if(!left && !sendatleastone) {
558 pkt = malloc(sizeof(pkt->hdr) + c->utcp->mtu);
564 pkt->hdr.src = c->src;
565 pkt->hdr.dst = c->dst;
566 pkt->hdr.ack = c->rcv.nxt;
567 pkt->hdr.wnd = c->rcvbuf.maxsize;
572 uint32_t seglen = left > c->utcp->mtu ? c->utcp->mtu : left;
573 pkt->hdr.seq = c->snd.nxt;
575 buffer_copy(&c->sndbuf, pkt->data, seqdiff(c->snd.nxt, c->snd.una), seglen);
577 c->snd.nxt += seglen;
580 if(seglen && fin_wanted(c, c->snd.nxt)) {
585 if(!c->rtt_start.tv_sec) {
586 // Start RTT measurement
587 gettimeofday(&c->rtt_start, NULL);
588 c->rtt_seq = pkt->hdr.seq + seglen;
589 debug(c, "starting RTT measurement, expecting ack %u\n", c->rtt_seq);
592 print_packet(c, "send", pkt, sizeof(pkt->hdr) + seglen);
593 c->utcp->send(c->utcp, pkt, sizeof(pkt->hdr) + seglen);
599 ssize_t utcp_send(struct utcp_connection *c, const void *data, size_t len) {
601 debug(c, "send() called on closed connection\n");
609 debug(c, "send() called on unconnected connection\n");
624 debug(c, "send() called on closed connection\n");
629 // Exit early if we have nothing to send.
640 // Check if we need to be able to buffer all data
642 if(c->flags & UTCP_NO_PARTIAL) {
643 if(len > buffer_free(&c->sndbuf)) {
644 if(len > c->sndbuf.maxsize) {
654 // Add data to send buffer.
656 if(is_reliable(c) || (c->state != SYN_SENT && c->state != SYN_RECEIVED)) {
657 len = buffer_put(&c->sndbuf, data, len);
673 // Don't send anything yet if the connection has not fully established yet
675 if(c->state == SYN_SENT || c->state == SYN_RECEIVED) {
681 if(!is_reliable(c)) {
682 c->snd.una = c->snd.nxt = c->snd.last;
683 buffer_get(&c->sndbuf, NULL, c->sndbuf.used);
686 if(is_reliable(c) && !timerisset(&c->rtrx_timeout)) {
687 start_retransmit_timer(c);
690 if(is_reliable(c) && !timerisset(&c->conn_timeout)) {
691 gettimeofday(&c->conn_timeout, NULL);
692 c->conn_timeout.tv_sec += c->utcp->timeout;
698 static void swap_ports(struct hdr *hdr) {
699 uint16_t tmp = hdr->src;
704 static void fast_retransmit(struct utcp_connection *c) {
705 if(c->state == CLOSED || c->snd.last == c->snd.una) {
706 debug(c, "fast_retransmit() called but nothing to retransmit!\n");
710 struct utcp *utcp = c->utcp;
717 pkt = malloc(sizeof(pkt->hdr) + c->utcp->mtu);
723 pkt->hdr.src = c->src;
724 pkt->hdr.dst = c->dst;
725 pkt->hdr.wnd = c->rcvbuf.maxsize;
734 // Send unacked data again.
735 pkt->hdr.seq = c->snd.una;
736 pkt->hdr.ack = c->rcv.nxt;
738 uint32_t len = min(seqdiff(c->snd.last, c->snd.una), utcp->mtu);
740 if(fin_wanted(c, c->snd.una + len)) {
745 buffer_copy(&c->sndbuf, pkt->data, 0, len);
746 print_packet(c, "rtrx", pkt, sizeof(pkt->hdr) + len);
747 utcp->send(utcp, pkt, sizeof(pkt->hdr) + len);
757 static void retransmit(struct utcp_connection *c) {
758 if(c->state == CLOSED || c->snd.last == c->snd.una) {
759 debug(c, "retransmit() called but nothing to retransmit!\n");
760 stop_retransmit_timer(c);
764 struct utcp *utcp = c->utcp;
771 pkt = malloc(sizeof(pkt->hdr) + c->utcp->mtu);
777 pkt->hdr.src = c->src;
778 pkt->hdr.dst = c->dst;
779 pkt->hdr.wnd = c->rcvbuf.maxsize;
784 // Send our SYN again
785 pkt->hdr.seq = c->snd.iss;
788 pkt->hdr.aux = 0x0101;
792 pkt->data[3] = c->flags & 0x7;
793 print_packet(c, "rtrx", pkt, sizeof(pkt->hdr) + 4);
794 utcp->send(utcp, pkt, sizeof(pkt->hdr) + 4);
799 pkt->hdr.seq = c->snd.nxt;
800 pkt->hdr.ack = c->rcv.nxt;
801 pkt->hdr.ctl = SYN | ACK;
802 print_packet(c, "rtrx", pkt, sizeof(pkt->hdr));
803 utcp->send(utcp, pkt, sizeof(pkt->hdr));
811 // Send unacked data again.
812 pkt->hdr.seq = c->snd.una;
813 pkt->hdr.ack = c->rcv.nxt;
815 uint32_t len = seqdiff(c->snd.last, c->snd.una);
817 if(len > utcp->mtu) {
821 if(fin_wanted(c, c->snd.una + len)) {
826 c->snd.nxt = c->snd.una + len;
828 // RFC 5681 slow start after timeout
829 c->snd.ssthresh = max(c->snd.cwnd / 2, utcp->mtu * 2); // eq. 4
830 c->snd.cwnd = utcp->mtu;
833 buffer_copy(&c->sndbuf, pkt->data, 0, len);
834 print_packet(c, "rtrx", pkt, sizeof(pkt->hdr) + len);
835 utcp->send(utcp, pkt, sizeof(pkt->hdr) + len);
842 // We shouldn't need to retransmit anything in this state.
846 stop_retransmit_timer(c);
850 start_retransmit_timer(c);
853 if(utcp->rto > MAX_RTO) {
857 c->rtt_start.tv_sec = 0; // invalidate RTT timer
863 /* Update receive buffer and SACK entries after consuming data.
867 * |.....0000..1111111111.....22222......3333|
870 * 0..3 represent the SACK entries. The ^ indicates up to which point we want
871 * to remove data from the receive buffer. The idea is to substract "len"
872 * from the offset of all the SACK entries, and then remove/cut down entries
873 * that are shifted to before the start of the receive buffer.
875 * There are three cases:
876 * - the SACK entry is after ^, in that case just change the offset.
877 * - the SACK entry starts before and ends after ^, so we have to
878 * change both its offset and size.
879 * - the SACK entry is completely before ^, in that case delete it.
881 static void sack_consume(struct utcp_connection *c, size_t len) {
882 debug(c, "sack_consume %lu\n", (unsigned long)len);
884 if(len > c->rcvbuf.used) {
885 debug(c, "all SACK entries consumed\n");
890 buffer_get(&c->rcvbuf, NULL, len);
892 for(int i = 0; i < NSACKS && c->sacks[i].len;) {
893 if(len < c->sacks[i].offset) {
894 c->sacks[i].offset -= len;
896 } else if(len < c->sacks[i].offset + c->sacks[i].len) {
897 c->sacks[i].len -= len - c->sacks[i].offset;
898 c->sacks[i].offset = 0;
902 memmove(&c->sacks[i], &c->sacks[i + 1], (NSACKS - 1 - i) * sizeof(c->sacks)[i]);
903 c->sacks[NSACKS - 1].len = 0;
911 for(int i = 0; i < NSACKS && c->sacks[i].len; i++) {
912 debug(c, "SACK[%d] offset %u len %u\n", i, c->sacks[i].offset, c->sacks[i].len);
916 static void handle_out_of_order(struct utcp_connection *c, uint32_t offset, const void *data, size_t len) {
917 debug(c, "out of order packet, offset %u\n", offset);
918 // Packet loss or reordering occured. Store the data in the buffer.
919 ssize_t rxd = buffer_put_at(&c->rcvbuf, offset, data, len);
921 if(rxd < 0 || (size_t)rxd < len) {
925 // Make note of where we put it.
926 for(int i = 0; i < NSACKS; i++) {
927 if(!c->sacks[i].len) { // nothing to merge, add new entry
928 debug(c, "new SACK entry %d\n", i);
929 c->sacks[i].offset = offset;
930 c->sacks[i].len = rxd;
932 } else if(offset < c->sacks[i].offset) {
933 if(offset + rxd < c->sacks[i].offset) { // insert before
934 if(!c->sacks[NSACKS - 1].len) { // only if room left
935 debug(c, "insert SACK entry at %d\n", i);
936 memmove(&c->sacks[i + 1], &c->sacks[i], (NSACKS - i - 1) * sizeof(c->sacks)[i]);
937 c->sacks[i].offset = offset;
938 c->sacks[i].len = rxd;
940 debug(c, "SACK entries full, dropping packet\n");
945 debug(c, "merge with start of SACK entry at %d\n", i);
946 c->sacks[i].offset = offset;
949 } else if(offset <= c->sacks[i].offset + c->sacks[i].len) {
950 if(offset + rxd > c->sacks[i].offset + c->sacks[i].len) { // merge
951 debug(c, "merge with end of SACK entry at %d\n", i);
952 c->sacks[i].len = offset + rxd - c->sacks[i].offset;
953 // TODO: handle potential merge with next entry
960 for(int i = 0; i < NSACKS && c->sacks[i].len; i++) {
961 debug(c, "SACK[%d] offset %u len %u\n", i, c->sacks[i].offset, c->sacks[i].len);
965 static void handle_in_order(struct utcp_connection *c, const void *data, size_t len) {
966 // Check if we can process out-of-order data now.
967 if(c->sacks[0].len && len >= c->sacks[0].offset) { // TODO: handle overlap with second SACK
968 debug(c, "incoming packet len %lu connected with SACK at %u\n", (unsigned long)len, c->sacks[0].offset);
969 buffer_put_at(&c->rcvbuf, 0, data, len); // TODO: handle return value
970 len = max(len, c->sacks[0].offset + c->sacks[0].len);
971 data = c->rcvbuf.data;
975 ssize_t rxd = c->recv(c, data, len);
977 if(rxd < 0 || (size_t)rxd != len) {
978 // TODO: handle the application not accepting all data.
984 sack_consume(c, len);
991 static void handle_incoming_data(struct utcp_connection *c, uint32_t seq, const void *data, size_t len) {
992 if(!is_reliable(c)) {
993 c->recv(c, data, len);
994 c->rcv.nxt = seq + len;
998 uint32_t offset = seqdiff(seq, c->rcv.nxt);
1000 if(offset + len > c->rcvbuf.maxsize) {
1005 handle_out_of_order(c, offset, data, len);
1007 handle_in_order(c, data, len);
1012 ssize_t utcp_recv(struct utcp *utcp, const void *data, size_t len) {
1013 const uint8_t *ptr = data;
1029 // Drop packets smaller than the header
1033 if(len < sizeof(hdr)) {
1034 print_packet(NULL, "recv", data, len);
1039 // Make a copy from the potentially unaligned data to a struct hdr
1041 memcpy(&hdr, ptr, sizeof(hdr));
1043 // Try to match the packet to an existing connection
1045 struct utcp_connection *c = find_connection(utcp, hdr.dst, hdr.src);
1046 print_packet(c, "recv", data, len);
1048 // Process the header
1053 // Drop packets with an unknown CTL flag
1055 if(hdr.ctl & ~(SYN | ACK | RST | FIN)) {
1056 print_packet(NULL, "recv", data, len);
1061 // Check for auxiliary headers
1063 const uint8_t *init = NULL;
1065 uint16_t aux = hdr.aux;
1068 size_t auxlen = 4 * (aux >> 8) & 0xf;
1069 uint8_t auxtype = aux & 0xff;
1078 if(!(hdr.ctl & SYN) || auxlen != 4) {
1094 if(!(aux & 0x800)) {
1103 memcpy(&aux, ptr, 2);
1108 bool has_data = len || (hdr.ctl & (SYN | FIN));
1110 // Is it for a new connection?
1113 // Ignore RST packets
1119 // Is it a SYN packet and are we LISTENing?
1121 if(hdr.ctl & SYN && !(hdr.ctl & ACK) && utcp->accept) {
1122 // If we don't want to accept it, send a RST back
1123 if((utcp->pre_accept && !utcp->pre_accept(utcp, hdr.dst))) {
1128 // Try to allocate memory, otherwise send a RST back
1129 c = allocate_connection(utcp, hdr.dst, hdr.src);
1136 // Parse auxilliary information
1143 c->flags = init[3] & 0x7;
1145 c->flags = UTCP_TCP;
1149 // Return SYN+ACK, go to SYN_RECEIVED state
1150 c->snd.wnd = hdr.wnd;
1151 c->rcv.irs = hdr.seq;
1152 c->rcv.nxt = c->rcv.irs + 1;
1153 set_state(c, SYN_RECEIVED);
1160 pkt.hdr.src = c->src;
1161 pkt.hdr.dst = c->dst;
1162 pkt.hdr.ack = c->rcv.irs + 1;
1163 pkt.hdr.seq = c->snd.iss;
1164 pkt.hdr.wnd = c->rcvbuf.maxsize;
1165 pkt.hdr.ctl = SYN | ACK;
1168 pkt.hdr.aux = 0x0101;
1172 pkt.data[3] = c->flags & 0x7;
1173 print_packet(c, "send", &pkt, sizeof(hdr) + 4);
1174 utcp->send(utcp, &pkt, sizeof(hdr) + 4);
1177 print_packet(c, "send", &pkt, sizeof(hdr));
1178 utcp->send(utcp, &pkt, sizeof(hdr));
1181 // No, we don't want your packets, send a RST back
1189 debug(c, "state %s\n", strstate[c->state]);
1191 // In case this is for a CLOSED connection, ignore the packet.
1192 // TODO: make it so incoming packets can never match a CLOSED connection.
1194 if(c->state == CLOSED) {
1195 debug(c, "got packet for closed connection\n");
1199 // It is for an existing connection.
1201 // 1. Drop invalid packets.
1203 // 1a. Drop packets that should not happen in our current state.
1224 // 1b. Discard data that is not in our receive window.
1226 if(is_reliable(c)) {
1229 if(c->state == SYN_SENT) {
1231 } else if(len == 0) {
1232 acceptable = seqdiff(hdr.seq, c->rcv.nxt) >= 0;
1234 int32_t rcv_offset = seqdiff(hdr.seq, c->rcv.nxt);
1236 // cut already accepted front overlapping
1237 if(rcv_offset < 0) {
1238 acceptable = len > (size_t) - rcv_offset;
1243 hdr.seq -= rcv_offset;
1246 acceptable = seqdiff(hdr.seq, c->rcv.nxt) >= 0 && seqdiff(hdr.seq, c->rcv.nxt) + len <= c->rcvbuf.maxsize;
1251 debug(c, "packet not acceptable, %u <= %u + %lu < %u\n", c->rcv.nxt, hdr.seq, (unsigned long)len, c->rcv.nxt + c->rcvbuf.maxsize);
1253 // Ignore unacceptable RST packets.
1258 // Otherwise, continue processing.
1263 c->snd.wnd = hdr.wnd; // TODO: move below
1265 // 1c. Drop packets with an invalid ACK.
1266 // ackno should not roll back, and it should also not be bigger than what we ever could have sent
1267 // (= snd.una + c->sndbuf.used).
1269 if(!is_reliable(c)) {
1270 if(hdr.ack != c->snd.last && c->state >= ESTABLISHED) {
1271 hdr.ack = c->snd.una;
1275 if(hdr.ctl & ACK && (seqdiff(hdr.ack, c->snd.last) > 0 || seqdiff(hdr.ack, c->snd.una) < 0)) {
1276 debug(c, "packet ack seqno out of range, %u <= %u < %u\n", c->snd.una, hdr.ack, c->snd.una + c->sndbuf.used);
1278 // Ignore unacceptable RST packets.
1286 // 2. Handle RST packets
1291 if(!(hdr.ctl & ACK)) {
1295 // The peer has refused our connection.
1296 set_state(c, CLOSED);
1297 errno = ECONNREFUSED;
1300 c->recv(c, NULL, 0);
1303 if(c->poll && !c->reapable) {
1314 // We haven't told the application about this connection yet. Silently delete.
1326 // The peer has aborted our connection.
1327 set_state(c, CLOSED);
1331 c->recv(c, NULL, 0);
1334 if(c->poll && !c->reapable) {
1347 // As far as the application is concerned, the connection has already been closed.
1348 // If it has called utcp_close() already, we can immediately free this connection.
1354 // Otherwise, immediately move to the CLOSED state.
1355 set_state(c, CLOSED);
1368 if(!(hdr.ctl & ACK)) {
1373 // 3. Advance snd.una
1375 advanced = seqdiff(hdr.ack, c->snd.una);
1379 if(c->rtt_start.tv_sec) {
1380 if(c->rtt_seq == hdr.ack) {
1381 struct timeval now, diff;
1382 gettimeofday(&now, NULL);
1383 timersub(&now, &c->rtt_start, &diff);
1384 update_rtt(c, diff.tv_sec * 1000000 + diff.tv_usec);
1385 c->rtt_start.tv_sec = 0;
1386 } else if(c->rtt_seq < hdr.ack) {
1387 debug(c, "cancelling RTT measurement: %u < %u\n", c->rtt_seq, hdr.ack);
1388 c->rtt_start.tv_sec = 0;
1392 int32_t data_acked = advanced;
1400 // TODO: handle FIN as well.
1405 assert(data_acked >= 0);
1408 int32_t bufused = seqdiff(c->snd.last, c->snd.una);
1409 assert(data_acked <= bufused);
1413 buffer_get(&c->sndbuf, NULL, data_acked);
1416 // Also advance snd.nxt if possible
1417 if(seqdiff(c->snd.nxt, hdr.ack) < 0) {
1418 c->snd.nxt = hdr.ack;
1421 c->snd.una = hdr.ack;
1424 if(c->dupack >= 3) {
1425 debug(c, "fast recovery ended\n");
1426 c->snd.cwnd = c->snd.ssthresh;
1432 // Increase the congestion window according to RFC 5681
1433 if(c->snd.cwnd < c->snd.ssthresh) {
1434 c->snd.cwnd += min(advanced, utcp->mtu); // eq. 2
1436 c->snd.cwnd += max(1, (utcp->mtu * utcp->mtu) / c->snd.cwnd); // eq. 3
1439 if(c->snd.cwnd > c->sndbuf.maxsize) {
1440 c->snd.cwnd = c->sndbuf.maxsize;
1445 // Check if we have sent a FIN that is now ACKed.
1448 if(c->snd.una == c->snd.last) {
1449 set_state(c, FIN_WAIT_2);
1455 if(c->snd.una == c->snd.last) {
1456 gettimeofday(&c->conn_timeout, NULL);
1457 c->conn_timeout.tv_sec += utcp->timeout;
1458 set_state(c, TIME_WAIT);
1467 if(!len && is_reliable(c)) {
1469 debug(c, "duplicate ACK %d\n", c->dupack);
1471 if(c->dupack == 3) {
1472 // RFC 5681 fast recovery
1473 debug(c, "fast recovery started\n", c->dupack);
1474 c->snd.ssthresh = max(c->snd.cwnd / 2, utcp->mtu * 2); // eq. 4
1475 c->snd.cwnd = min(c->snd.ssthresh + 3 * utcp->mtu, c->sndbuf.maxsize);
1477 if(c->snd.cwnd > c->sndbuf.maxsize) {
1478 c->snd.cwnd = c->sndbuf.maxsize;
1484 } else if(c->dupack > 3) {
1485 c->snd.cwnd += utcp->mtu;
1487 if(c->snd.cwnd > c->sndbuf.maxsize) {
1488 c->snd.cwnd = c->sndbuf.maxsize;
1499 if(c->snd.una == c->snd.last) {
1500 stop_retransmit_timer(c);
1501 timerclear(&c->conn_timeout);
1502 } else if(is_reliable(c)) {
1503 start_retransmit_timer(c);
1504 gettimeofday(&c->conn_timeout, NULL);
1505 c->conn_timeout.tv_sec += utcp->timeout;
1510 // 5. Process SYN stuff
1516 // This is a SYNACK. It should always have ACKed the SYN.
1521 c->rcv.irs = hdr.seq;
1522 c->rcv.nxt = hdr.seq;
1526 set_state(c, FIN_WAIT_1);
1528 set_state(c, ESTABLISHED);
1531 // TODO: notify application of this somehow.
1535 // This is a retransmit of a SYN, send back the SYNACK.
1545 // Ehm, no. We should never receive a second SYN.
1555 // SYN counts as one sequence number
1559 // 6. Process new data
1561 if(c->state == SYN_RECEIVED) {
1562 // This is the ACK after the SYNACK. It should always have ACKed the SYNACK.
1567 // Are we still LISTENing?
1569 utcp->accept(c, c->src);
1572 if(c->state != ESTABLISHED) {
1573 set_state(c, CLOSED);
1583 // This should never happen.
1598 // Ehm no, We should never receive more data after a FIN.
1608 handle_incoming_data(c, hdr.seq, ptr, len);
1611 // 7. Process FIN stuff
1613 if((hdr.ctl & FIN) && (!is_reliable(c) || hdr.seq + len == c->rcv.nxt)) {
1617 // This should never happen.
1624 set_state(c, CLOSE_WAIT);
1628 set_state(c, CLOSING);
1632 gettimeofday(&c->conn_timeout, NULL);
1633 c->conn_timeout.tv_sec += utcp->timeout;
1634 set_state(c, TIME_WAIT);
1641 // Ehm, no. We should never receive a second FIN.
1651 // FIN counts as one sequence number
1655 // Inform the application that the peer closed its end of the connection.
1658 c->recv(c, NULL, 0);
1662 // Now we send something back if:
1663 // - we received data, so we have to send back an ACK
1664 // -> sendatleastone = true
1665 // - or we got an ack, so we should maybe send a bit more data
1666 // -> sendatleastone = false
1668 if(is_reliable(c) || hdr.ctl & SYN || hdr.ctl & FIN) {
1683 hdr.ack = hdr.seq + len;
1685 hdr.ctl = RST | ACK;
1688 print_packet(c, "send", &hdr, sizeof(hdr));
1689 utcp->send(utcp, &hdr, sizeof(hdr));
1694 int utcp_shutdown(struct utcp_connection *c, int dir) {
1695 debug(c, "shutdown %d at %u\n", dir, c ? c->snd.last : 0);
1703 debug(c, "shutdown() called on closed connection\n");
1708 if(!(dir == UTCP_SHUT_RD || dir == UTCP_SHUT_WR || dir == UTCP_SHUT_RDWR)) {
1713 // TCP does not have a provision for stopping incoming packets.
1714 // The best we can do is to just ignore them.
1715 if(dir == UTCP_SHUT_RD || dir == UTCP_SHUT_RDWR) {
1719 // The rest of the code deals with shutting down writes.
1720 if(dir == UTCP_SHUT_RD) {
1724 // Only process shutting down writes once.
1742 set_state(c, FIN_WAIT_1);
1750 set_state(c, CLOSING);
1763 if(!timerisset(&c->rtrx_timeout)) {
1764 start_retransmit_timer(c);
1770 static bool reset_connection(struct utcp_connection *c) {
1777 debug(c, "abort() called on closed connection\n");
1794 set_state(c, CLOSED);
1802 set_state(c, CLOSED);
1812 hdr.seq = c->snd.nxt;
1817 print_packet(c, "send", &hdr, sizeof(hdr));
1818 c->utcp->send(c->utcp, &hdr, sizeof(hdr));
1822 // Closes all the opened connections
1823 void utcp_abort_all_connections(struct utcp *utcp) {
1829 for(int i = 0; i < utcp->nconnections; i++) {
1830 struct utcp_connection *c = utcp->connections[i];
1832 if(c->reapable || c->state == CLOSED) {
1836 utcp_recv_t old_recv = c->recv;
1837 utcp_poll_t old_poll = c->poll;
1839 reset_connection(c);
1843 old_recv(c, NULL, 0);
1846 if(old_poll && !c->reapable) {
1855 int utcp_close(struct utcp_connection *c) {
1856 if(utcp_shutdown(c, SHUT_RDWR) && errno != ENOTCONN) {
1866 int utcp_abort(struct utcp_connection *c) {
1867 if(!reset_connection(c)) {
1876 * One call to this function will loop through all connections,
1877 * checking if something needs to be resent or not.
1878 * The return value is the time to the next timeout in milliseconds,
1879 * or maybe a negative value if the timeout is infinite.
1881 struct timeval utcp_timeout(struct utcp *utcp) {
1883 gettimeofday(&now, NULL);
1884 struct timeval next = {now.tv_sec + 3600, now.tv_usec};
1886 for(int i = 0; i < utcp->nconnections; i++) {
1887 struct utcp_connection *c = utcp->connections[i];
1893 // delete connections that have been utcp_close()d.
1894 if(c->state == CLOSED) {
1896 debug(c, "reaping\n");
1904 if(timerisset(&c->conn_timeout) && timercmp(&c->conn_timeout, &now, <)) {
1909 c->recv(c, NULL, 0);
1912 if(c->poll && !c->reapable) {
1919 if(timerisset(&c->rtrx_timeout) && timercmp(&c->rtrx_timeout, &now, <)) {
1920 debug(c, "retransmitting after timeout\n");
1925 if((c->state == ESTABLISHED || c->state == CLOSE_WAIT)) {
1926 uint32_t len = buffer_free(&c->sndbuf);
1931 } else if(c->state == CLOSED) {
1936 if(timerisset(&c->conn_timeout) && timercmp(&c->conn_timeout, &next, <)) {
1937 next = c->conn_timeout;
1940 if(timerisset(&c->rtrx_timeout) && timercmp(&c->rtrx_timeout, &next, <)) {
1941 next = c->rtrx_timeout;
1945 struct timeval diff;
1947 timersub(&next, &now, &diff);
1952 bool utcp_is_active(struct utcp *utcp) {
1957 for(int i = 0; i < utcp->nconnections; i++)
1958 if(utcp->connections[i]->state != CLOSED && utcp->connections[i]->state != TIME_WAIT) {
1965 struct utcp *utcp_init(utcp_accept_t accept, utcp_pre_accept_t pre_accept, utcp_send_t send, void *priv) {
1971 struct utcp *utcp = calloc(1, sizeof(*utcp));
1977 utcp->accept = accept;
1978 utcp->pre_accept = pre_accept;
1981 utcp->mtu = DEFAULT_MTU;
1982 utcp->timeout = DEFAULT_USER_TIMEOUT; // sec
1983 utcp->rto = START_RTO; // usec
1988 void utcp_exit(struct utcp *utcp) {
1993 for(int i = 0; i < utcp->nconnections; i++) {
1994 struct utcp_connection *c = utcp->connections[i];
1998 c->recv(c, NULL, 0);
2001 if(c->poll && !c->reapable) {
2006 buffer_exit(&c->rcvbuf);
2007 buffer_exit(&c->sndbuf);
2011 free(utcp->connections);
2015 uint16_t utcp_get_mtu(struct utcp *utcp) {
2016 return utcp ? utcp->mtu : 0;
2019 void utcp_set_mtu(struct utcp *utcp, uint16_t mtu) {
2020 // TODO: handle overhead of the header
2026 void utcp_reset_timers(struct utcp *utcp) {
2031 struct timeval now, then;
2033 gettimeofday(&now, NULL);
2037 then.tv_sec += utcp->timeout;
2039 for(int i = 0; i < utcp->nconnections; i++) {
2040 struct utcp_connection *c = utcp->connections[i];
2046 if(timerisset(&c->rtrx_timeout)) {
2047 c->rtrx_timeout = now;
2050 if(timerisset(&c->conn_timeout)) {
2051 c->conn_timeout = then;
2054 c->rtt_start.tv_sec = 0;
2057 if(utcp->rto > START_RTO) {
2058 utcp->rto = START_RTO;
2062 int utcp_get_user_timeout(struct utcp *u) {
2063 return u ? u->timeout : 0;
2066 void utcp_set_user_timeout(struct utcp *u, int timeout) {
2068 u->timeout = timeout;
2072 size_t utcp_get_sndbuf(struct utcp_connection *c) {
2073 return c ? c->sndbuf.maxsize : 0;
2076 size_t utcp_get_sndbuf_free(struct utcp_connection *c) {
2086 return buffer_free(&c->sndbuf);
2093 void utcp_set_sndbuf(struct utcp_connection *c, size_t size) {
2098 c->sndbuf.maxsize = size;
2100 if(c->sndbuf.maxsize != size) {
2101 c->sndbuf.maxsize = -1;
2105 size_t utcp_get_rcvbuf(struct utcp_connection *c) {
2106 return c ? c->rcvbuf.maxsize : 0;
2109 size_t utcp_get_rcvbuf_free(struct utcp_connection *c) {
2110 if(c && (c->state == ESTABLISHED || c->state == CLOSE_WAIT)) {
2111 return buffer_free(&c->rcvbuf);
2117 void utcp_set_rcvbuf(struct utcp_connection *c, size_t size) {
2122 c->rcvbuf.maxsize = size;
2124 if(c->rcvbuf.maxsize != size) {
2125 c->rcvbuf.maxsize = -1;
2129 size_t utcp_get_sendq(struct utcp_connection *c) {
2130 return c->sndbuf.used;
2133 size_t utcp_get_recvq(struct utcp_connection *c) {
2134 return c->rcvbuf.used;
2137 bool utcp_get_nodelay(struct utcp_connection *c) {
2138 return c ? c->nodelay : false;
2141 void utcp_set_nodelay(struct utcp_connection *c, bool nodelay) {
2143 c->nodelay = nodelay;
2147 bool utcp_get_keepalive(struct utcp_connection *c) {
2148 return c ? c->keepalive : false;
2151 void utcp_set_keepalive(struct utcp_connection *c, bool keepalive) {
2153 c->keepalive = keepalive;
2157 size_t utcp_get_outq(struct utcp_connection *c) {
2158 return c ? seqdiff(c->snd.nxt, c->snd.una) : 0;
2161 void utcp_set_recv_cb(struct utcp_connection *c, utcp_recv_t recv) {
2167 void utcp_set_poll_cb(struct utcp_connection *c, utcp_poll_t poll) {
2173 void utcp_set_accept_cb(struct utcp *utcp, utcp_accept_t accept, utcp_pre_accept_t pre_accept) {
2175 utcp->accept = accept;
2176 utcp->pre_accept = pre_accept;
2180 void utcp_expect_data(struct utcp_connection *c, bool expect) {
2181 if(!c || c->reapable) {
2185 if(!(c->state == ESTABLISHED || c->state == FIN_WAIT_1 || c->state == FIN_WAIT_2)) {
2190 // If we expect data, start the connection timer.
2191 if(!timerisset(&c->conn_timeout)) {
2192 gettimeofday(&c->conn_timeout, NULL);
2193 c->conn_timeout.tv_sec += c->utcp->timeout;
2196 // If we want to cancel expecting data, only clear the timer when there is no unACKed data.
2197 if(c->snd.una == c->snd.last) {
2198 timerclear(&c->conn_timeout);
2203 void utcp_offline(struct utcp *utcp, bool offline) {
2205 gettimeofday(&now, NULL);
2207 for(int i = 0; i < utcp->nconnections; i++) {
2208 struct utcp_connection *c = utcp->connections[i];
2214 utcp_expect_data(c, offline);
2217 if(timerisset(&c->rtrx_timeout)) {
2218 c->rtrx_timeout = now;
2221 utcp->connections[i]->rtt_start.tv_sec = 0;
2225 if(!offline && utcp->rto > START_RTO) {
2226 utcp->rto = START_RTO;