2 utcp.c -- Userspace TCP
3 Copyright (C) 2014-2017 Guus Sliepen <guus@tinc-vpn.org>
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License along
16 with this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
31 #include <sys/socket.h>
33 #include "utcp_priv.h"
48 #define timersub(a, b, r)\
50 (r)->tv_sec = (a)->tv_sec - (b)->tv_sec;\
51 (r)->tv_usec = (a)->tv_usec - (b)->tv_usec;\
53 (r)->tv_sec--, (r)->tv_usec += USEC_PER_SEC;\
57 static inline size_t max(size_t a, size_t b) {
64 static void debug(const char *format, ...) {
67 vfprintf(stderr, format, ap);
71 static void print_packet(struct utcp *utcp, const char *dir, const void *pkt, size_t len) {
74 if(len < sizeof(hdr)) {
75 debug("%p %s: short packet (%lu bytes)\n", utcp, dir, (unsigned long)len);
79 memcpy(&hdr, pkt, sizeof(hdr));
80 debug("%p %s: len=%lu, src=%u dst=%u seq=%u ack=%u wnd=%u aux=%x ctl=", utcp, dir, (unsigned long)len, hdr.src, hdr.dst, hdr.seq, hdr.ack, hdr.wnd, hdr.aux);
98 if(len > sizeof(hdr)) {
99 uint32_t datalen = len - sizeof(hdr);
100 const uint8_t *data = (uint8_t *)pkt + sizeof(hdr);
101 char str[datalen * 2 + 1];
104 for(uint32_t i = 0; i < datalen; i++) {
105 *p++ = "0123456789ABCDEF"[data[i] >> 4];
106 *p++ = "0123456789ABCDEF"[data[i] & 15];
111 debug(" data=%s", str);
117 #define debug(...) do {} while(0)
118 #define print_packet(...) do {} while(0)
121 static void set_state(struct utcp_connection *c, enum state state) {
124 if(state == ESTABLISHED) {
125 timerclear(&c->conn_timeout);
128 debug("%p new state: %s\n", c->utcp, strstate[state]);
131 static bool fin_wanted(struct utcp_connection *c, uint32_t seq) {
132 if(seq != c->snd.last) {
147 static bool is_reliable(struct utcp_connection *c) {
148 return c->flags & UTCP_RELIABLE;
151 static int32_t seqdiff(uint32_t a, uint32_t b) {
156 // TODO: convert to ringbuffers to avoid memmove() operations.
158 // Store data into the buffer
159 static ssize_t buffer_put_at(struct buffer *buf, size_t offset, const void *data, size_t len) {
160 debug("buffer_put_at %lu %lu %lu\n", (unsigned long)buf->used, (unsigned long)offset, (unsigned long)len);
162 size_t required = offset + len;
164 if(required > buf->maxsize) {
165 if(offset >= buf->maxsize) {
169 len = buf->maxsize - offset;
170 required = buf->maxsize;
173 if(required > buf->size) {
174 size_t newsize = buf->size;
181 } while(newsize < required);
184 if(newsize > buf->maxsize) {
185 newsize = buf->maxsize;
188 char *newdata = realloc(buf->data, newsize);
198 memcpy(buf->data + offset, data, len);
200 if(required > buf->used) {
201 buf->used = required;
207 static ssize_t buffer_put(struct buffer *buf, const void *data, size_t len) {
208 return buffer_put_at(buf, buf->used, data, len);
211 // Get data from the buffer. data can be NULL.
212 static ssize_t buffer_get(struct buffer *buf, void *data, size_t len) {
213 if(len > buf->used) {
218 memcpy(data, buf->data, len);
221 if(len < buf->used) {
222 memmove(buf->data, buf->data + len, buf->used - len);
229 // Copy data from the buffer without removing it.
230 static ssize_t buffer_copy(struct buffer *buf, void *data, size_t offset, size_t len) {
231 if(offset >= buf->used) {
235 if(offset + len > buf->used) {
236 len = buf->used - offset;
239 memcpy(data, buf->data + offset, len);
243 static bool buffer_init(struct buffer *buf, uint32_t len, uint32_t maxlen) {
244 memset(buf, 0, sizeof(*buf));
247 buf->data = malloc(len);
255 buf->maxsize = maxlen;
259 static void buffer_exit(struct buffer *buf) {
261 memset(buf, 0, sizeof(*buf));
264 static uint32_t buffer_free(const struct buffer *buf) {
265 return buf->maxsize - buf->used;
268 // Connections are stored in a sorted list.
269 // This gives O(log(N)) lookup time, O(N log(N)) insertion time and O(N) deletion time.
271 static int compare(const void *va, const void *vb) {
274 const struct utcp_connection *a = *(struct utcp_connection **)va;
275 const struct utcp_connection *b = *(struct utcp_connection **)vb;
278 assert(a->src && b->src);
280 int c = (int)a->src - (int)b->src;
286 c = (int)a->dst - (int)b->dst;
290 static struct utcp_connection *find_connection(const struct utcp *utcp, uint16_t src, uint16_t dst) {
291 if(!utcp->nconnections) {
295 struct utcp_connection key = {
299 struct utcp_connection **match = bsearch(&keyp, utcp->connections, utcp->nconnections, sizeof(*utcp->connections), compare);
300 return match ? *match : NULL;
303 static void free_connection(struct utcp_connection *c) {
304 struct utcp *utcp = c->utcp;
305 struct utcp_connection **cp = bsearch(&c, utcp->connections, utcp->nconnections, sizeof(*utcp->connections), compare);
309 int i = cp - utcp->connections;
310 memmove(cp, cp + 1, (utcp->nconnections - i - 1) * sizeof(*cp));
311 utcp->nconnections--;
313 buffer_exit(&c->rcvbuf);
314 buffer_exit(&c->sndbuf);
318 static struct utcp_connection *allocate_connection(struct utcp *utcp, uint16_t src, uint16_t dst) {
319 // Check whether this combination of src and dst is free
322 if(find_connection(utcp, src, dst)) {
326 } else { // If src == 0, generate a random port number with the high bit set
327 if(utcp->nconnections >= 32767) {
332 src = rand() | 0x8000;
334 while(find_connection(utcp, src, dst)) {
339 // Allocate memory for the new connection
341 if(utcp->nconnections >= utcp->nallocated) {
342 if(!utcp->nallocated) {
343 utcp->nallocated = 4;
345 utcp->nallocated *= 2;
348 struct utcp_connection **new_array = realloc(utcp->connections, utcp->nallocated * sizeof(*utcp->connections));
354 utcp->connections = new_array;
357 struct utcp_connection *c = calloc(1, sizeof(*c));
363 if(!buffer_init(&c->sndbuf, DEFAULT_SNDBUFSIZE, DEFAULT_MAXSNDBUFSIZE)) {
368 if(!buffer_init(&c->rcvbuf, DEFAULT_RCVBUFSIZE, DEFAULT_MAXRCVBUFSIZE)) {
369 buffer_exit(&c->sndbuf);
374 // Fill in the details
383 c->snd.una = c->snd.iss;
384 c->snd.nxt = c->snd.iss + 1;
385 c->rcv.wnd = utcp->mtu;
386 c->snd.last = c->snd.nxt;
387 c->snd.cwnd = utcp->mtu;
390 // Add it to the sorted list of connections
392 utcp->connections[utcp->nconnections++] = c;
393 qsort(utcp->connections, utcp->nconnections, sizeof(*utcp->connections), compare);
398 static inline uint32_t absdiff(uint32_t a, uint32_t b) {
406 // Update RTT variables. See RFC 6298.
407 static void update_rtt(struct utcp_connection *c, uint32_t rtt) {
409 debug("invalid rtt\n");
413 struct utcp *utcp = c->utcp;
417 utcp->rttvar = rtt / 2;
418 utcp->rto = rtt + max(2 * rtt, CLOCK_GRANULARITY);
420 utcp->rttvar = (utcp->rttvar * 3 + absdiff(utcp->srtt, rtt)) / 4;
421 utcp->srtt = (utcp->srtt * 7 + rtt) / 8;
422 utcp->rto = utcp->srtt + max(utcp->rttvar, CLOCK_GRANULARITY);
425 if(utcp->rto > MAX_RTO) {
429 debug("rtt %u srtt %u rttvar %u rto %u\n", rtt, utcp->srtt, utcp->rttvar, utcp->rto);
432 static void start_retransmit_timer(struct utcp_connection *c) {
433 gettimeofday(&c->rtrx_timeout, NULL);
434 c->rtrx_timeout.tv_usec += c->utcp->rto;
436 while(c->rtrx_timeout.tv_usec >= 1000000) {
437 c->rtrx_timeout.tv_usec -= 1000000;
438 c->rtrx_timeout.tv_sec++;
441 debug("timeout set to %lu.%06lu (%u)\n", c->rtrx_timeout.tv_sec, c->rtrx_timeout.tv_usec, c->utcp->rto);
444 static void stop_retransmit_timer(struct utcp_connection *c) {
445 timerclear(&c->rtrx_timeout);
446 debug("timeout cleared\n");
449 struct utcp_connection *utcp_connect_ex(struct utcp *utcp, uint16_t dst, utcp_recv_t recv, void *priv, uint32_t flags) {
450 struct utcp_connection *c = allocate_connection(utcp, 0, dst);
456 assert((flags & ~0xf) == 0);
467 pkt.hdr.src = c->src;
468 pkt.hdr.dst = c->dst;
469 pkt.hdr.seq = c->snd.iss;
471 pkt.hdr.wnd = c->rcv.wnd;
473 pkt.hdr.aux = 0x0101;
477 pkt.init[3] = flags & 0x7;
479 set_state(c, SYN_SENT);
481 print_packet(utcp, "send", &pkt, sizeof(pkt));
482 utcp->send(utcp, &pkt, sizeof(pkt));
484 gettimeofday(&c->conn_timeout, NULL);
485 c->conn_timeout.tv_sec += utcp->timeout;
487 start_retransmit_timer(c);
492 struct utcp_connection *utcp_connect(struct utcp *utcp, uint16_t dst, utcp_recv_t recv, void *priv) {
493 return utcp_connect_ex(utcp, dst, recv, priv, UTCP_TCP);
496 void utcp_accept(struct utcp_connection *c, utcp_recv_t recv, void *priv) {
497 if(c->reapable || c->state != SYN_RECEIVED) {
498 debug("Error: accept() called on invalid connection %p in state %s\n", c, strstate[c->state]);
502 debug("%p accepted, %p %p\n", c, recv, priv);
505 set_state(c, ESTABLISHED);
508 static void ack(struct utcp_connection *c, bool sendatleastone) {
509 int32_t left = seqdiff(c->snd.last, c->snd.nxt);
510 int32_t cwndleft = c->snd.cwnd - seqdiff(c->snd.nxt, c->snd.una);
511 debug("cwndleft = %d\n", cwndleft);
519 if(cwndleft < left) {
523 if(!left && !sendatleastone) {
532 pkt = malloc(sizeof(pkt->hdr) + c->utcp->mtu);
538 pkt->hdr.src = c->src;
539 pkt->hdr.dst = c->dst;
540 pkt->hdr.ack = c->rcv.nxt;
541 pkt->hdr.wnd = c->snd.wnd;
546 uint32_t seglen = left > c->utcp->mtu ? c->utcp->mtu : left;
547 pkt->hdr.seq = c->snd.nxt;
549 buffer_copy(&c->sndbuf, pkt->data, seqdiff(c->snd.nxt, c->snd.una), seglen);
551 c->snd.nxt += seglen;
554 if(seglen && fin_wanted(c, c->snd.nxt)) {
559 if(!c->rtt_start.tv_sec) {
560 // Start RTT measurement
561 gettimeofday(&c->rtt_start, NULL);
562 c->rtt_seq = pkt->hdr.seq + seglen;
563 debug("Starting RTT measurement, expecting ack %u\n", c->rtt_seq);
566 print_packet(c->utcp, "send", pkt, sizeof(pkt->hdr) + seglen);
567 c->utcp->send(c->utcp, pkt, sizeof(pkt->hdr) + seglen);
573 ssize_t utcp_send(struct utcp_connection *c, const void *data, size_t len) {
575 debug("Error: send() called on closed connection %p\n", c);
583 debug("Error: send() called on unconnected connection %p\n", c);
598 debug("Error: send() called on closing connection %p\n", c);
603 // Exit early if we have nothing to send.
614 // Add data to send buffer.
616 len = buffer_put(&c->sndbuf, data, len);
625 // Don't send anything yet if the connection has not fully established yet
627 if(c->state == SYN_SENT || c->state == SYN_RECEIVED) {
633 if(!is_reliable(c)) {
634 c->snd.una = c->snd.nxt = c->snd.last;
635 buffer_get(&c->sndbuf, NULL, c->sndbuf.used);
638 if(is_reliable(c) && !timerisset(&c->rtrx_timeout)) {
639 start_retransmit_timer(c);
642 if(is_reliable(c) && !timerisset(&c->conn_timeout)) {
643 gettimeofday(&c->conn_timeout, NULL);
644 c->conn_timeout.tv_sec += c->utcp->timeout;
650 static void swap_ports(struct hdr *hdr) {
651 uint16_t tmp = hdr->src;
656 static void retransmit(struct utcp_connection *c) {
657 if(c->state == CLOSED || c->snd.last == c->snd.una) {
658 debug("Retransmit() called but nothing to retransmit!\n");
659 stop_retransmit_timer(c);
663 struct utcp *utcp = c->utcp;
670 pkt = malloc(sizeof(pkt->hdr) + c->utcp->mtu);
676 pkt->hdr.src = c->src;
677 pkt->hdr.dst = c->dst;
678 pkt->hdr.wnd = c->rcv.wnd;
683 // Send our SYN again
684 pkt->hdr.seq = c->snd.iss;
687 pkt->hdr.aux = 0x0101;
691 pkt->data[3] = c->flags & 0x7;
692 print_packet(c->utcp, "rtrx", pkt, sizeof(pkt->hdr) + 4);
693 utcp->send(utcp, pkt, sizeof(pkt->hdr) + 4);
698 pkt->hdr.seq = c->snd.nxt;
699 pkt->hdr.ack = c->rcv.nxt;
700 pkt->hdr.ctl = SYN | ACK;
701 print_packet(c->utcp, "rtrx", pkt, sizeof(pkt->hdr));
702 utcp->send(utcp, pkt, sizeof(pkt->hdr));
710 // Send unacked data again.
711 pkt->hdr.seq = c->snd.una;
712 pkt->hdr.ack = c->rcv.nxt;
714 uint32_t len = seqdiff(c->snd.last, c->snd.una);
716 if(len > utcp->mtu) {
720 if(fin_wanted(c, c->snd.una + len)) {
725 c->snd.nxt = c->snd.una + len;
726 c->snd.cwnd = utcp->mtu; // reduce cwnd on retransmit
727 buffer_copy(&c->sndbuf, pkt->data, 0, len);
728 print_packet(c->utcp, "rtrx", pkt, sizeof(pkt->hdr) + len);
729 utcp->send(utcp, pkt, sizeof(pkt->hdr) + len);
736 // We shouldn't need to retransmit anything in this state.
740 stop_retransmit_timer(c);
744 start_retransmit_timer(c);
747 if(utcp->rto > MAX_RTO) {
751 c->rtt_start.tv_sec = 0; // invalidate RTT timer
757 /* Update receive buffer and SACK entries after consuming data.
761 * |.....0000..1111111111.....22222......3333|
764 * 0..3 represent the SACK entries. The ^ indicates up to which point we want
765 * to remove data from the receive buffer. The idea is to substract "len"
766 * from the offset of all the SACK entries, and then remove/cut down entries
767 * that are shifted to before the start of the receive buffer.
769 * There are three cases:
770 * - the SACK entry is after ^, in that case just change the offset.
771 * - the SACK entry starts before and ends after ^, so we have to
772 * change both its offset and size.
773 * - the SACK entry is completely before ^, in that case delete it.
775 static void sack_consume(struct utcp_connection *c, size_t len) {
776 debug("sack_consume %lu\n", (unsigned long)len);
778 if(len > c->rcvbuf.used) {
779 debug("All SACK entries consumed");
784 buffer_get(&c->rcvbuf, NULL, len);
786 for(int i = 0; i < NSACKS && c->sacks[i].len;) {
787 if(len < c->sacks[i].offset) {
788 c->sacks[i].offset -= len;
790 } else if(len < c->sacks[i].offset + c->sacks[i].len) {
791 c->sacks[i].len -= len - c->sacks[i].offset;
792 c->sacks[i].offset = 0;
796 memmove(&c->sacks[i], &c->sacks[i + 1], (NSACKS - 1 - i) * sizeof(c->sacks)[i]);
797 c->sacks[NSACKS - 1].len = 0;
805 for(int i = 0; i < NSACKS && c->sacks[i].len; i++) {
806 debug("SACK[%d] offset %u len %u\n", i, c->sacks[i].offset, c->sacks[i].len);
810 static void handle_out_of_order(struct utcp_connection *c, uint32_t offset, const void *data, size_t len) {
811 debug("out of order packet, offset %u\n", offset);
812 // Packet loss or reordering occured. Store the data in the buffer.
813 ssize_t rxd = buffer_put_at(&c->rcvbuf, offset, data, len);
815 if(rxd < 0 || (size_t)rxd < len) {
819 // Make note of where we put it.
820 for(int i = 0; i < NSACKS; i++) {
821 if(!c->sacks[i].len) { // nothing to merge, add new entry
822 debug("New SACK entry %d\n", i);
823 c->sacks[i].offset = offset;
824 c->sacks[i].len = rxd;
826 } else if(offset < c->sacks[i].offset) {
827 if(offset + rxd < c->sacks[i].offset) { // insert before
828 if(!c->sacks[NSACKS - 1].len) { // only if room left
829 debug("Insert SACK entry at %d\n", i);
830 memmove(&c->sacks[i + 1], &c->sacks[i], (NSACKS - i - 1) * sizeof(c->sacks)[i]);
831 c->sacks[i].offset = offset;
832 c->sacks[i].len = rxd;
834 debug("SACK entries full, dropping packet\n");
839 debug("Merge with start of SACK entry at %d\n", i);
840 c->sacks[i].offset = offset;
843 } else if(offset <= c->sacks[i].offset + c->sacks[i].len) {
844 if(offset + rxd > c->sacks[i].offset + c->sacks[i].len) { // merge
845 debug("Merge with end of SACK entry at %d\n", i);
846 c->sacks[i].len = offset + rxd - c->sacks[i].offset;
847 // TODO: handle potential merge with next entry
854 for(int i = 0; i < NSACKS && c->sacks[i].len; i++) {
855 debug("SACK[%d] offset %u len %u\n", i, c->sacks[i].offset, c->sacks[i].len);
859 static void handle_in_order(struct utcp_connection *c, const void *data, size_t len) {
860 // Check if we can process out-of-order data now.
861 if(c->sacks[0].len && len >= c->sacks[0].offset) { // TODO: handle overlap with second SACK
862 debug("incoming packet len %lu connected with SACK at %u\n", (unsigned long)len, c->sacks[0].offset);
863 buffer_put_at(&c->rcvbuf, 0, data, len); // TODO: handle return value
864 len = max(len, c->sacks[0].offset + c->sacks[0].len);
865 data = c->rcvbuf.data;
869 ssize_t rxd = c->recv(c, data, len);
871 if(rxd < 0 || (size_t)rxd != len) {
872 // TODO: handle the application not accepting all data.
878 sack_consume(c, len);
885 static void handle_incoming_data(struct utcp_connection *c, uint32_t seq, const void *data, size_t len) {
886 if(!is_reliable(c)) {
887 c->recv(c, data, len);
888 c->rcv.nxt = seq + len;
892 uint32_t offset = seqdiff(seq, c->rcv.nxt);
894 if(offset + len > c->rcvbuf.maxsize) {
899 handle_out_of_order(c, offset, data, len);
901 handle_in_order(c, data, len);
906 ssize_t utcp_recv(struct utcp *utcp, const void *data, size_t len) {
907 const uint8_t *ptr = data;
923 print_packet(utcp, "recv", data, len);
925 // Drop packets smaller than the header
929 if(len < sizeof(hdr)) {
934 // Make a copy from the potentially unaligned data to a struct hdr
936 memcpy(&hdr, ptr, sizeof(hdr));
940 // Drop packets with an unknown CTL flag
942 if(hdr.ctl & ~(SYN | ACK | RST | FIN)) {
947 // Check for auxiliary headers
949 const uint8_t *init = NULL;
951 uint16_t aux = hdr.aux;
954 size_t auxlen = 4 * (aux >> 8) & 0xf;
955 uint8_t auxtype = aux & 0xff;
964 if(!(hdr.ctl & SYN) || auxlen != 4) {
989 memcpy(&aux, ptr, 2);
994 // Try to match the packet to an existing connection
996 struct utcp_connection *c = find_connection(utcp, hdr.dst, hdr.src);
998 // Is it for a new connection?
1001 // Ignore RST packets
1007 // Is it a SYN packet and are we LISTENing?
1009 if(hdr.ctl & SYN && !(hdr.ctl & ACK) && utcp->accept) {
1010 // If we don't want to accept it, send a RST back
1011 if((utcp->pre_accept && !utcp->pre_accept(utcp, hdr.dst))) {
1016 // Try to allocate memory, otherwise send a RST back
1017 c = allocate_connection(utcp, hdr.dst, hdr.src);
1024 // Parse auxilliary information
1031 c->flags = init[3] & 0x7;
1033 c->flags = UTCP_TCP;
1036 // Return SYN+ACK, go to SYN_RECEIVED state
1037 c->snd.wnd = hdr.wnd;
1038 c->rcv.irs = hdr.seq;
1039 c->rcv.nxt = c->rcv.irs + 1;
1040 set_state(c, SYN_RECEIVED);
1047 pkt.hdr.src = c->src;
1048 pkt.hdr.dst = c->dst;
1049 pkt.hdr.ack = c->rcv.irs + 1;
1050 pkt.hdr.seq = c->snd.iss;
1051 pkt.hdr.wnd = c->rcv.wnd;
1052 pkt.hdr.ctl = SYN | ACK;
1055 pkt.hdr.aux = 0x0101;
1059 pkt.data[3] = c->flags & 0x7;
1060 print_packet(c->utcp, "send", &pkt, sizeof(hdr) + 4);
1061 utcp->send(utcp, &pkt, sizeof(hdr) + 4);
1064 print_packet(c->utcp, "send", &pkt, sizeof(hdr));
1065 utcp->send(utcp, &pkt, sizeof(hdr));
1068 // No, we don't want your packets, send a RST back
1076 debug("%p state %s\n", c->utcp, strstate[c->state]);
1078 // In case this is for a CLOSED connection, ignore the packet.
1079 // TODO: make it so incoming packets can never match a CLOSED connection.
1081 if(c->state == CLOSED) {
1082 debug("Got packet for closed connection\n");
1086 // It is for an existing connection.
1088 uint32_t prevrcvnxt = c->rcv.nxt;
1090 // 1. Drop invalid packets.
1092 // 1a. Drop packets that should not happen in our current state.
1113 // 1b. Drop packets with a sequence number not in our receive window.
1117 if(c->state == SYN_SENT) {
1119 } else if(len == 0) {
1120 acceptable = seqdiff(hdr.seq, c->rcv.nxt) >= 0;
1122 int32_t rcv_offset = seqdiff(hdr.seq, c->rcv.nxt);
1124 // cut already accepted front overlapping
1125 if(rcv_offset < 0) {
1126 acceptable = len > (size_t) - rcv_offset;
1131 hdr.seq -= rcv_offset;
1134 acceptable = seqdiff(hdr.seq, c->rcv.nxt) >= 0 && seqdiff(hdr.seq, c->rcv.nxt) + len <= c->rcvbuf.maxsize;
1139 debug("Packet not acceptable, %u <= %u + %lu < %u\n", c->rcv.nxt, hdr.seq, (unsigned long)len, c->rcv.nxt + c->rcvbuf.maxsize);
1141 // Ignore unacceptable RST packets.
1146 // Otherwise, continue processing.
1150 c->snd.wnd = hdr.wnd; // TODO: move below
1152 // 1c. Drop packets with an invalid ACK.
1153 // ackno should not roll back, and it should also not be bigger than what we ever could have sent
1154 // (= snd.una + c->sndbuf.used).
1156 if(hdr.ctl & ACK && (seqdiff(hdr.ack, c->snd.last) > 0 || seqdiff(hdr.ack, c->snd.una) < 0)) {
1157 debug("Packet ack seqno out of range, %u <= %u < %u\n", c->snd.una, hdr.ack, c->snd.una + c->sndbuf.used);
1159 // Ignore unacceptable RST packets.
1167 // 2. Handle RST packets
1172 if(!(hdr.ctl & ACK)) {
1176 // The peer has refused our connection.
1177 set_state(c, CLOSED);
1178 errno = ECONNREFUSED;
1181 c->recv(c, NULL, 0);
1191 // We haven't told the application about this connection yet. Silently delete.
1203 // The peer has aborted our connection.
1204 set_state(c, CLOSED);
1208 c->recv(c, NULL, 0);
1220 // As far as the application is concerned, the connection has already been closed.
1221 // If it has called utcp_close() already, we can immediately free this connection.
1227 // Otherwise, immediately move to the CLOSED state.
1228 set_state(c, CLOSED);
1241 if(!(hdr.ctl & ACK)) {
1246 // 3. Advance snd.una
1248 advanced = seqdiff(hdr.ack, c->snd.una);
1249 prevrcvnxt = c->rcv.nxt;
1253 if(c->rtt_start.tv_sec) {
1254 if(c->rtt_seq == hdr.ack) {
1255 struct timeval now, diff;
1256 gettimeofday(&now, NULL);
1257 timersub(&now, &c->rtt_start, &diff);
1258 update_rtt(c, diff.tv_sec * 1000000 + diff.tv_usec);
1259 c->rtt_start.tv_sec = 0;
1260 } else if(c->rtt_seq < hdr.ack) {
1261 debug("Cancelling RTT measurement: %u < %u\n", c->rtt_seq, hdr.ack);
1262 c->rtt_start.tv_sec = 0;
1266 int32_t data_acked = advanced;
1274 // TODO: handle FIN as well.
1279 assert(data_acked >= 0);
1281 int32_t bufused = seqdiff(c->snd.last, c->snd.una);
1282 assert(data_acked <= bufused);
1285 buffer_get(&c->sndbuf, NULL, data_acked);
1288 // Also advance snd.nxt if possible
1289 if(seqdiff(c->snd.nxt, hdr.ack) < 0) {
1290 c->snd.nxt = hdr.ack;
1293 c->snd.una = hdr.ack;
1296 c->snd.cwnd += utcp->mtu;
1298 if(c->snd.cwnd > c->sndbuf.maxsize) {
1299 c->snd.cwnd = c->sndbuf.maxsize;
1302 // Check if we have sent a FIN that is now ACKed.
1305 if(c->snd.una == c->snd.last) {
1306 set_state(c, FIN_WAIT_2);
1312 if(c->snd.una == c->snd.last) {
1313 gettimeofday(&c->conn_timeout, NULL);
1314 c->conn_timeout.tv_sec += 60;
1315 set_state(c, TIME_WAIT);
1324 if(!len && is_reliable(c)) {
1327 if(c->dupack == 3) {
1328 debug("Triplicate ACK\n");
1329 //TODO: Resend one packet and go to fast recovery mode. See RFC 6582.
1330 //We do a very simple variant here; reset the nxt pointer to the last acknowledged packet from the peer.
1331 //Reset the congestion window so we wait for ACKs.
1332 c->snd.nxt = c->snd.una;
1333 c->snd.cwnd = utcp->mtu;
1334 start_retransmit_timer(c);
1342 if(c->snd.una == c->snd.last) {
1343 stop_retransmit_timer(c);
1344 timerclear(&c->conn_timeout);
1345 } else if(is_reliable(c)) {
1346 start_retransmit_timer(c);
1347 gettimeofday(&c->conn_timeout, NULL);
1348 c->conn_timeout.tv_sec += utcp->timeout;
1353 // 5. Process SYN stuff
1359 // This is a SYNACK. It should always have ACKed the SYN.
1364 c->rcv.irs = hdr.seq;
1365 c->rcv.nxt = hdr.seq;
1369 set_state(c, FIN_WAIT_1);
1371 set_state(c, ESTABLISHED);
1374 // TODO: notify application of this somehow.
1385 // Ehm, no. We should never receive a second SYN.
1395 // SYN counts as one sequence number
1399 // 6. Process new data
1401 if(c->state == SYN_RECEIVED) {
1402 // This is the ACK after the SYNACK. It should always have ACKed the SYNACK.
1407 // Are we still LISTENing?
1409 utcp->accept(c, c->src);
1412 if(c->state != ESTABLISHED) {
1413 set_state(c, CLOSED);
1423 // This should never happen.
1438 // Ehm no, We should never receive more data after a FIN.
1448 handle_incoming_data(c, hdr.seq, ptr, len);
1451 // 7. Process FIN stuff
1453 if((hdr.ctl & FIN) && hdr.seq + len == c->rcv.nxt) {
1457 // This should never happen.
1464 set_state(c, CLOSE_WAIT);
1468 set_state(c, CLOSING);
1472 gettimeofday(&c->conn_timeout, NULL);
1473 c->conn_timeout.tv_sec += 60;
1474 set_state(c, TIME_WAIT);
1481 // Ehm, no. We should never receive a second FIN.
1491 // FIN counts as one sequence number
1495 // Inform the application that the peer closed the connection.
1498 c->recv(c, NULL, 0);
1502 // Now we send something back if:
1503 // - we advanced rcv.nxt (ie, we got some data that needs to be ACKed)
1504 // -> sendatleastone = true
1505 // - or we got an ack, so we should maybe send a bit more data
1506 // -> sendatleastone = false
1508 ack(c, len || prevrcvnxt != c->rcv.nxt);
1520 hdr.ack = hdr.seq + len;
1522 hdr.ctl = RST | ACK;
1525 print_packet(utcp, "send", &hdr, sizeof(hdr));
1526 utcp->send(utcp, &hdr, sizeof(hdr));
1531 int utcp_shutdown(struct utcp_connection *c, int dir) {
1532 debug("%p shutdown %d at %u\n", c ? c->utcp : NULL, dir, c ? c->snd.last : 0);
1540 debug("Error: shutdown() called on closed connection %p\n", c);
1545 if(!(dir == UTCP_SHUT_RD || dir == UTCP_SHUT_WR || dir == UTCP_SHUT_RDWR)) {
1550 // TCP does not have a provision for stopping incoming packets.
1551 // The best we can do is to just ignore them.
1552 if(dir == UTCP_SHUT_RD || dir == UTCP_SHUT_RDWR) {
1556 // The rest of the code deals with shutting down writes.
1557 if(dir == UTCP_SHUT_RD) {
1561 // Only process shutting down writes once.
1579 set_state(c, FIN_WAIT_1);
1587 set_state(c, CLOSING);
1600 if(!timerisset(&c->rtrx_timeout)) {
1601 start_retransmit_timer(c);
1607 static bool reset_connection(struct utcp_connection *c) {
1614 debug("Error: abort() called on closed connection %p\n", c);
1631 set_state(c, CLOSED);
1639 set_state(c, CLOSED);
1649 hdr.seq = c->snd.nxt;
1654 print_packet(c->utcp, "send", &hdr, sizeof(hdr));
1655 c->utcp->send(c->utcp, &hdr, sizeof(hdr));
1659 // Closes all the opened connections
1660 void utcp_abort_all_connections(struct utcp *utcp) {
1666 for(int i = 0; i < utcp->nconnections; i++) {
1667 struct utcp_connection *c = utcp->connections[i];
1669 if(c->reapable || c->state == CLOSED) {
1673 utcp_recv_t old_recv = c->recv;
1675 reset_connection(c);
1679 old_recv(c, NULL, 0);
1686 int utcp_close(struct utcp_connection *c) {
1687 if(utcp_shutdown(c, SHUT_RDWR) && errno != ENOTCONN) {
1697 int utcp_abort(struct utcp_connection *c) {
1698 if(!reset_connection(c)) {
1707 * One call to this function will loop through all connections,
1708 * checking if something needs to be resent or not.
1709 * The return value is the time to the next timeout in milliseconds,
1710 * or maybe a negative value if the timeout is infinite.
1712 struct timeval utcp_timeout(struct utcp *utcp) {
1714 gettimeofday(&now, NULL);
1715 struct timeval next = {now.tv_sec + 3600, now.tv_usec};
1717 for(int i = 0; i < utcp->nconnections; i++) {
1718 struct utcp_connection *c = utcp->connections[i];
1724 // delete connections that have been utcp_close()d.
1725 if(c->state == CLOSED) {
1727 debug("Reaping %p\n", c);
1735 if(timerisset(&c->conn_timeout) && timercmp(&c->conn_timeout, &now, <)) {
1740 c->recv(c, NULL, 0);
1750 if(timerisset(&c->rtrx_timeout) && timercmp(&c->rtrx_timeout, &now, <)) {
1751 debug("retransmit()\n");
1756 if((c->state == ESTABLISHED || c->state == CLOSE_WAIT)) {
1757 uint32_t len = buffer_free(&c->sndbuf);
1762 } else if(c->state == CLOSED) {
1767 if(timerisset(&c->conn_timeout) && timercmp(&c->conn_timeout, &next, <)) {
1768 next = c->conn_timeout;
1771 if(timerisset(&c->rtrx_timeout) && timercmp(&c->rtrx_timeout, &next, <)) {
1772 next = c->rtrx_timeout;
1776 struct timeval diff;
1778 timersub(&next, &now, &diff);
1783 bool utcp_is_active(struct utcp *utcp) {
1788 for(int i = 0; i < utcp->nconnections; i++)
1789 if(utcp->connections[i]->state != CLOSED && utcp->connections[i]->state != TIME_WAIT) {
1796 struct utcp *utcp_init(utcp_accept_t accept, utcp_pre_accept_t pre_accept, utcp_send_t send, void *priv) {
1802 struct utcp *utcp = calloc(1, sizeof(*utcp));
1808 utcp->accept = accept;
1809 utcp->pre_accept = pre_accept;
1812 utcp->mtu = DEFAULT_MTU;
1813 utcp->timeout = DEFAULT_USER_TIMEOUT; // sec
1814 utcp->rto = START_RTO; // usec
1819 void utcp_exit(struct utcp *utcp) {
1824 for(int i = 0; i < utcp->nconnections; i++) {
1825 struct utcp_connection *c = utcp->connections[i];
1829 c->recv(c, NULL, 0);
1832 buffer_exit(&c->rcvbuf);
1833 buffer_exit(&c->sndbuf);
1837 free(utcp->connections);
1841 uint16_t utcp_get_mtu(struct utcp *utcp) {
1842 return utcp ? utcp->mtu : 0;
1845 void utcp_set_mtu(struct utcp *utcp, uint16_t mtu) {
1846 // TODO: handle overhead of the header
1852 void utcp_reset_timers(struct utcp *utcp) {
1857 struct timeval now, then;
1859 gettimeofday(&now, NULL);
1863 then.tv_sec += utcp->timeout;
1865 for(int i = 0; i < utcp->nconnections; i++) {
1866 struct utcp_connection *c = utcp->connections[i];
1872 c->rtrx_timeout = now;
1873 c->conn_timeout = then;
1874 c->rtt_start.tv_sec = 0;
1877 if(utcp->rto > START_RTO) {
1878 utcp->rto = START_RTO;
1882 int utcp_get_user_timeout(struct utcp *u) {
1883 return u ? u->timeout : 0;
1886 void utcp_set_user_timeout(struct utcp *u, int timeout) {
1888 u->timeout = timeout;
1892 size_t utcp_get_sndbuf(struct utcp_connection *c) {
1893 return c ? c->sndbuf.maxsize : 0;
1896 size_t utcp_get_sndbuf_free(struct utcp_connection *c) {
1906 return buffer_free(&c->sndbuf);
1913 void utcp_set_sndbuf(struct utcp_connection *c, size_t size) {
1918 c->sndbuf.maxsize = size;
1920 if(c->sndbuf.maxsize != size) {
1921 c->sndbuf.maxsize = -1;
1925 size_t utcp_get_rcvbuf(struct utcp_connection *c) {
1926 return c ? c->rcvbuf.maxsize : 0;
1929 size_t utcp_get_rcvbuf_free(struct utcp_connection *c) {
1930 if(c && (c->state == ESTABLISHED || c->state == CLOSE_WAIT)) {
1931 return buffer_free(&c->rcvbuf);
1937 void utcp_set_rcvbuf(struct utcp_connection *c, size_t size) {
1942 c->rcvbuf.maxsize = size;
1944 if(c->rcvbuf.maxsize != size) {
1945 c->rcvbuf.maxsize = -1;
1949 size_t utcp_get_sendq(struct utcp_connection *c) {
1950 return c->sndbuf.used;
1953 size_t utcp_get_recvq(struct utcp_connection *c) {
1954 return c->rcvbuf.used;
1957 bool utcp_get_nodelay(struct utcp_connection *c) {
1958 return c ? c->nodelay : false;
1961 void utcp_set_nodelay(struct utcp_connection *c, bool nodelay) {
1963 c->nodelay = nodelay;
1967 bool utcp_get_keepalive(struct utcp_connection *c) {
1968 return c ? c->keepalive : false;
1971 void utcp_set_keepalive(struct utcp_connection *c, bool keepalive) {
1973 c->keepalive = keepalive;
1977 size_t utcp_get_outq(struct utcp_connection *c) {
1978 return c ? seqdiff(c->snd.nxt, c->snd.una) : 0;
1981 void utcp_set_recv_cb(struct utcp_connection *c, utcp_recv_t recv) {
1987 void utcp_set_poll_cb(struct utcp_connection *c, utcp_poll_t poll) {
1993 void utcp_set_accept_cb(struct utcp *utcp, utcp_accept_t accept, utcp_pre_accept_t pre_accept) {
1995 utcp->accept = accept;
1996 utcp->pre_accept = pre_accept;
2000 void utcp_expect_data(struct utcp_connection *c, bool expect) {
2001 if(!c || c->reapable) {
2005 if(!(c->state == ESTABLISHED || c->state == FIN_WAIT_1 || c->state == FIN_WAIT_2)) {
2010 // If we expect data, start the connection timer.
2011 if(!timerisset(&c->conn_timeout)) {
2012 gettimeofday(&c->conn_timeout, NULL);
2013 c->conn_timeout.tv_sec += c->utcp->timeout;
2016 // If we want to cancel expecting data, only clear the timer when there is no unACKed data.
2017 if(c->snd.una == c->snd.last) {
2018 timerclear(&c->conn_timeout);
2023 void utcp_offline(struct utcp *utcp, bool offline) {
2024 for(int i = 0; i < utcp->nconnections; i++) {
2025 struct utcp_connection *c = utcp->connections[i];
2028 utcp_expect_data(c, offline);
2030 // If we are online again, reset the retransmission timers, but keep the connection timeout as it is,
2031 // to prevent peers toggling online/offline state frequently from keeping connections alive
2032 // if there is no progress in sending actual data.
2034 gettimeofday(&utcp->connections[i]->rtrx_timeout, NULL);
2035 utcp->connections[i]->rtt_start.tv_sec = 0;
2040 if(!offline && utcp->rto > START_RTO) {
2041 utcp->rto = START_RTO;