2 utcp.c -- Userspace TCP
3 Copyright (C) 2014-2017 Guus Sliepen <guus@tinc-vpn.org>
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License along
16 with this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
31 #include <sys/socket.h>
33 #include "utcp_priv.h"
48 #define timersub(a, b, r)\
50 (r)->tv_sec = (a)->tv_sec - (b)->tv_sec;\
51 (r)->tv_usec = (a)->tv_usec - (b)->tv_usec;\
53 (r)->tv_sec--, (r)->tv_usec += USEC_PER_SEC;\
57 static inline size_t max(size_t a, size_t b) {
64 static void debug(const char *format, ...) {
67 vfprintf(stderr, format, ap);
71 static void print_packet(struct utcp *utcp, const char *dir, const void *pkt, size_t len) {
74 if(len < sizeof(hdr)) {
75 debug("%p %s: short packet (%lu bytes)\n", utcp, dir, (unsigned long)len);
79 memcpy(&hdr, pkt, sizeof(hdr));
80 debug("%p %s: len=%lu, src=%u dst=%u seq=%u ack=%u wnd=%u aux=%x ctl=", utcp, dir, (unsigned long)len, hdr.src, hdr.dst, hdr.seq, hdr.ack, hdr.wnd, hdr.aux);
98 if(len > sizeof(hdr)) {
99 uint32_t datalen = len - sizeof(hdr);
100 const uint8_t *data = (uint8_t *)pkt + sizeof(hdr);
101 char str[datalen * 2 + 1];
104 for(uint32_t i = 0; i < datalen; i++) {
105 *p++ = "0123456789ABCDEF"[data[i] >> 4];
106 *p++ = "0123456789ABCDEF"[data[i] & 15];
111 debug(" data=%s", str);
117 #define debug(...) do {} while(0)
118 #define print_packet(...) do {} while(0)
121 static void set_state(struct utcp_connection *c, enum state state) {
124 if(state == ESTABLISHED) {
125 timerclear(&c->conn_timeout);
128 debug("%p new state: %s\n", c->utcp, strstate[state]);
131 static bool fin_wanted(struct utcp_connection *c, uint32_t seq) {
132 if(seq != c->snd.last) {
147 static bool is_reliable(struct utcp_connection *c) {
148 return c->flags & UTCP_RELIABLE;
151 static int32_t seqdiff(uint32_t a, uint32_t b) {
156 static bool buffer_wraps(struct buffer *buf) {
157 return buf->size - buf->offset < buf->used;
160 static bool buffer_resize(struct buffer *buf, uint32_t newsize) {
161 char *newdata = realloc(buf->data, newsize);
169 if(buffer_wraps(buf)) {
170 // Shift the right part of the buffer until it hits the end of the new buffer.
174 // [345.........|........012]
175 uint32_t tailsize = buf->size - buf->offset;
176 uint32_t newoffset = newsize - tailsize;
177 memmove(buf + newoffset, buf + buf->offset, tailsize);
178 buf->offset = newoffset;
185 // Store data into the buffer
186 static ssize_t buffer_put_at(struct buffer *buf, size_t offset, const void *data, size_t len) {
187 debug("buffer_put_at %lu %lu %lu\n", (unsigned long)buf->used, (unsigned long)offset, (unsigned long)len);
189 // Ensure we don't store more than maxsize bytes in total
190 size_t required = offset + len;
192 if(required > buf->maxsize) {
193 if(offset >= buf->maxsize) {
197 len = buf->maxsize - offset;
198 required = buf->maxsize;
201 // Check if we need to resize the buffer
202 if(required > buf->size) {
203 size_t newsize = buf->size;
211 } while(newsize < required);
213 if(newsize > buf->maxsize) {
214 newsize = buf->maxsize;
217 if(!buffer_resize(buf, newsize)) {
222 uint32_t realoffset = buf->offset + offset;
224 if(buf->size - buf->offset < offset) {
225 // The offset wrapped
226 realoffset -= buf->size;
229 if(buf->size - realoffset < len) {
230 // The new chunk of data must be wrapped
231 memcpy(buf->data + realoffset, data, buf->size - realoffset);
232 memcpy(buf->data, (char *)data + buf->size - realoffset, len - (buf->size - realoffset));
234 memcpy(buf->data + realoffset, data, len);
237 if(required > buf->used) {
238 buf->used = required;
244 static ssize_t buffer_put(struct buffer *buf, const void *data, size_t len) {
245 return buffer_put_at(buf, buf->used, data, len);
248 // Copy data from the buffer without removing it.
249 static ssize_t buffer_copy(struct buffer *buf, void *data, size_t offset, size_t len) {
250 // Ensure we don't copy more than is actually stored in the buffer
251 if(offset >= buf->used) {
255 if(buf->used - offset < len) {
256 len = buf->used - offset;
259 uint32_t realoffset = buf->offset + offset;
261 if(buf->size - buf->offset < offset) {
262 // The offset wrapped
263 realoffset -= buf->size;
266 if(buf->size - realoffset < len) {
267 // The data is wrapped
268 memcpy(data, buf->data + realoffset, buf->size - realoffset);
269 memcpy((char *)data + buf->size - realoffset, buf->data, len - (buf->size - realoffset));
271 memcpy(data, buf->data + realoffset, len);
277 // Get data from the buffer.
278 static ssize_t buffer_get(struct buffer *buf, void *data, size_t len) {
279 len = buffer_copy(buf, data, 0, len);
281 if(buf->size - buf->offset < len) {
282 buf->offset -= buf->size;
290 // Discard data from the buffer.
291 static ssize_t buffer_discard(struct buffer *buf, size_t len) {
292 if(buf->used < len) {
296 if(buf->size - buf->offset < len) {
297 buf->offset -= buf->size;
306 static bool buffer_set_size(struct buffer *buf, uint32_t minsize, uint32_t maxsize) {
307 if(maxsize < minsize) {
311 buf->maxsize = maxsize;
313 return buf->size >= minsize || buffer_resize(buf, minsize);
316 static void buffer_exit(struct buffer *buf) {
318 memset(buf, 0, sizeof(*buf));
321 static uint32_t buffer_free(const struct buffer *buf) {
322 return buf->maxsize - buf->used;
325 // Connections are stored in a sorted list.
326 // This gives O(log(N)) lookup time, O(N log(N)) insertion time and O(N) deletion time.
328 static int compare(const void *va, const void *vb) {
331 const struct utcp_connection *a = *(struct utcp_connection **)va;
332 const struct utcp_connection *b = *(struct utcp_connection **)vb;
335 assert(a->src && b->src);
337 int c = (int)a->src - (int)b->src;
343 c = (int)a->dst - (int)b->dst;
347 static struct utcp_connection *find_connection(const struct utcp *utcp, uint16_t src, uint16_t dst) {
348 if(!utcp->nconnections) {
352 struct utcp_connection key = {
356 struct utcp_connection **match = bsearch(&keyp, utcp->connections, utcp->nconnections, sizeof(*utcp->connections), compare);
357 return match ? *match : NULL;
360 static void free_connection(struct utcp_connection *c) {
361 struct utcp *utcp = c->utcp;
362 struct utcp_connection **cp = bsearch(&c, utcp->connections, utcp->nconnections, sizeof(*utcp->connections), compare);
366 int i = cp - utcp->connections;
367 memmove(cp, cp + 1, (utcp->nconnections - i - 1) * sizeof(*cp));
368 utcp->nconnections--;
370 buffer_exit(&c->rcvbuf);
371 buffer_exit(&c->sndbuf);
375 static struct utcp_connection *allocate_connection(struct utcp *utcp, uint16_t src, uint16_t dst) {
376 // Check whether this combination of src and dst is free
379 if(find_connection(utcp, src, dst)) {
383 } else { // If src == 0, generate a random port number with the high bit set
384 if(utcp->nconnections >= 32767) {
389 src = rand() | 0x8000;
391 while(find_connection(utcp, src, dst)) {
396 // Allocate memory for the new connection
398 if(utcp->nconnections >= utcp->nallocated) {
399 if(!utcp->nallocated) {
400 utcp->nallocated = 4;
402 utcp->nallocated *= 2;
405 struct utcp_connection **new_array = realloc(utcp->connections, utcp->nallocated * sizeof(*utcp->connections));
411 utcp->connections = new_array;
414 struct utcp_connection *c = calloc(1, sizeof(*c));
420 if(!buffer_set_size(&c->sndbuf, DEFAULT_SNDBUFSIZE, DEFAULT_MAXSNDBUFSIZE)) {
425 if(!buffer_set_size(&c->rcvbuf, DEFAULT_RCVBUFSIZE, DEFAULT_MAXRCVBUFSIZE)) {
426 buffer_exit(&c->sndbuf);
431 // Fill in the details
440 c->snd.una = c->snd.iss;
441 c->snd.nxt = c->snd.iss + 1;
442 c->rcv.wnd = utcp->mtu;
443 c->snd.last = c->snd.nxt;
444 c->snd.cwnd = utcp->mtu;
447 // Add it to the sorted list of connections
449 utcp->connections[utcp->nconnections++] = c;
450 qsort(utcp->connections, utcp->nconnections, sizeof(*utcp->connections), compare);
455 static inline uint32_t absdiff(uint32_t a, uint32_t b) {
463 // Update RTT variables. See RFC 6298.
464 static void update_rtt(struct utcp_connection *c, uint32_t rtt) {
466 debug("invalid rtt\n");
470 struct utcp *utcp = c->utcp;
474 utcp->rttvar = rtt / 2;
476 utcp->rttvar = (utcp->rttvar * 3 + absdiff(utcp->srtt, rtt)) / 4;
477 utcp->srtt = (utcp->srtt * 7 + rtt) / 8;
480 utcp->rto = utcp->srtt + max(4 * utcp->rttvar, CLOCK_GRANULARITY);
482 if(utcp->rto > MAX_RTO) {
486 debug("rtt %u srtt %u rttvar %u rto %u\n", rtt, utcp->srtt, utcp->rttvar, utcp->rto);
489 static void start_retransmit_timer(struct utcp_connection *c) {
490 gettimeofday(&c->rtrx_timeout, NULL);
491 c->rtrx_timeout.tv_usec += c->utcp->rto;
493 while(c->rtrx_timeout.tv_usec >= 1000000) {
494 c->rtrx_timeout.tv_usec -= 1000000;
495 c->rtrx_timeout.tv_sec++;
498 debug("timeout set to %lu.%06lu (%u)\n", c->rtrx_timeout.tv_sec, c->rtrx_timeout.tv_usec, c->utcp->rto);
501 static void stop_retransmit_timer(struct utcp_connection *c) {
502 timerclear(&c->rtrx_timeout);
503 debug("timeout cleared\n");
506 struct utcp_connection *utcp_connect_ex(struct utcp *utcp, uint16_t dst, utcp_recv_t recv, void *priv, uint32_t flags) {
507 struct utcp_connection *c = allocate_connection(utcp, 0, dst);
513 assert((flags & ~0x1f) == 0);
524 pkt.hdr.src = c->src;
525 pkt.hdr.dst = c->dst;
526 pkt.hdr.seq = c->snd.iss;
528 pkt.hdr.wnd = c->rcv.wnd;
530 pkt.hdr.aux = 0x0101;
534 pkt.init[3] = flags & 0x7;
536 set_state(c, SYN_SENT);
538 print_packet(utcp, "send", &pkt, sizeof(pkt));
539 utcp->send(utcp, &pkt, sizeof(pkt));
541 gettimeofday(&c->conn_timeout, NULL);
542 c->conn_timeout.tv_sec += utcp->timeout;
544 start_retransmit_timer(c);
549 struct utcp_connection *utcp_connect(struct utcp *utcp, uint16_t dst, utcp_recv_t recv, void *priv) {
550 return utcp_connect_ex(utcp, dst, recv, priv, UTCP_TCP);
553 void utcp_accept(struct utcp_connection *c, utcp_recv_t recv, void *priv) {
554 if(c->reapable || c->state != SYN_RECEIVED) {
555 debug("Error: accept() called on invalid connection %p in state %s\n", c, strstate[c->state]);
559 debug("%p accepted, %p %p\n", c, recv, priv);
562 set_state(c, ESTABLISHED);
565 static void ack(struct utcp_connection *c, bool sendatleastone) {
566 int32_t left = seqdiff(c->snd.last, c->snd.nxt);
567 int32_t cwndleft = c->snd.cwnd - seqdiff(c->snd.nxt, c->snd.una);
568 debug("cwndleft = %d\n", cwndleft);
576 if(cwndleft < left) {
580 if(!left && !sendatleastone) {
587 } *pkt = c->utcp->pkt;
589 pkt->hdr.src = c->src;
590 pkt->hdr.dst = c->dst;
591 pkt->hdr.ack = c->rcv.nxt;
592 pkt->hdr.wnd = c->snd.wnd;
597 uint32_t seglen = left > c->utcp->mtu ? c->utcp->mtu : left;
598 pkt->hdr.seq = c->snd.nxt;
600 buffer_copy(&c->sndbuf, pkt->data, seqdiff(c->snd.nxt, c->snd.una), seglen);
602 c->snd.nxt += seglen;
605 if(seglen && fin_wanted(c, c->snd.nxt)) {
610 if(!c->rtt_start.tv_sec) {
611 // Start RTT measurement
612 gettimeofday(&c->rtt_start, NULL);
613 c->rtt_seq = pkt->hdr.seq + seglen;
614 debug("Starting RTT measurement, expecting ack %u\n", c->rtt_seq);
617 print_packet(c->utcp, "send", pkt, sizeof(pkt->hdr) + seglen);
618 c->utcp->send(c->utcp, pkt, sizeof(pkt->hdr) + seglen);
622 ssize_t utcp_send(struct utcp_connection *c, const void *data, size_t len) {
624 debug("Error: send() called on closed connection %p\n", c);
632 debug("Error: send() called on unconnected connection %p\n", c);
647 debug("Error: send() called on closing connection %p\n", c);
652 // Exit early if we have nothing to send.
663 // Check if we need to be able to buffer all data
665 if(c->flags & UTCP_NO_PARTIAL) {
666 if(len > buffer_free(&c->sndbuf)) {
667 if(len > c->sndbuf.maxsize) {
677 // Add data to send buffer.
679 if(is_reliable(c) || (c->state != SYN_SENT && c->state != SYN_RECEIVED)) {
680 len = buffer_put(&c->sndbuf, data, len);
696 // Don't send anything yet if the connection has not fully established yet
698 if(c->state == SYN_SENT || c->state == SYN_RECEIVED) {
704 if(!is_reliable(c)) {
705 c->snd.una = c->snd.nxt = c->snd.last;
706 buffer_get(&c->sndbuf, NULL, c->sndbuf.used);
709 if(is_reliable(c) && !timerisset(&c->rtrx_timeout)) {
710 start_retransmit_timer(c);
713 if(is_reliable(c) && !timerisset(&c->conn_timeout)) {
714 gettimeofday(&c->conn_timeout, NULL);
715 c->conn_timeout.tv_sec += c->utcp->timeout;
721 static void swap_ports(struct hdr *hdr) {
722 uint16_t tmp = hdr->src;
727 static void retransmit(struct utcp_connection *c) {
728 if(c->state == CLOSED || c->snd.last == c->snd.una) {
729 debug("Retransmit() called but nothing to retransmit!\n");
730 stop_retransmit_timer(c);
734 struct utcp *utcp = c->utcp;
739 } *pkt = c->utcp->pkt;
741 pkt->hdr.src = c->src;
742 pkt->hdr.dst = c->dst;
743 pkt->hdr.wnd = c->rcv.wnd;
748 // Send our SYN again
749 pkt->hdr.seq = c->snd.iss;
752 pkt->hdr.aux = 0x0101;
756 pkt->data[3] = c->flags & 0x7;
757 print_packet(c->utcp, "rtrx", pkt, sizeof(pkt->hdr) + 4);
758 utcp->send(utcp, pkt, sizeof(pkt->hdr) + 4);
763 pkt->hdr.seq = c->snd.nxt;
764 pkt->hdr.ack = c->rcv.nxt;
765 pkt->hdr.ctl = SYN | ACK;
766 print_packet(c->utcp, "rtrx", pkt, sizeof(pkt->hdr));
767 utcp->send(utcp, pkt, sizeof(pkt->hdr));
775 // Send unacked data again.
776 pkt->hdr.seq = c->snd.una;
777 pkt->hdr.ack = c->rcv.nxt;
779 uint32_t len = seqdiff(c->snd.last, c->snd.una);
781 if(len > utcp->mtu) {
785 if(fin_wanted(c, c->snd.una + len)) {
790 c->snd.nxt = c->snd.una + len;
791 c->snd.cwnd = utcp->mtu; // reduce cwnd on retransmit
792 buffer_copy(&c->sndbuf, pkt->data, 0, len);
793 print_packet(c->utcp, "rtrx", pkt, sizeof(pkt->hdr) + len);
794 utcp->send(utcp, pkt, sizeof(pkt->hdr) + len);
801 // We shouldn't need to retransmit anything in this state.
805 stop_retransmit_timer(c);
809 start_retransmit_timer(c);
812 if(utcp->rto > MAX_RTO) {
816 c->rtt_start.tv_sec = 0; // invalidate RTT timer
822 /* Update receive buffer and SACK entries after consuming data.
826 * |.....0000..1111111111.....22222......3333|
829 * 0..3 represent the SACK entries. The ^ indicates up to which point we want
830 * to remove data from the receive buffer. The idea is to substract "len"
831 * from the offset of all the SACK entries, and then remove/cut down entries
832 * that are shifted to before the start of the receive buffer.
834 * There are three cases:
835 * - the SACK entry is after ^, in that case just change the offset.
836 * - the SACK entry starts before and ends after ^, so we have to
837 * change both its offset and size.
838 * - the SACK entry is completely before ^, in that case delete it.
840 static void sack_consume(struct utcp_connection *c, size_t len) {
841 debug("sack_consume %lu\n", (unsigned long)len);
843 if(len > c->rcvbuf.used) {
844 debug("All SACK entries consumed");
849 buffer_get(&c->rcvbuf, NULL, len);
851 for(int i = 0; i < NSACKS && c->sacks[i].len;) {
852 if(len < c->sacks[i].offset) {
853 c->sacks[i].offset -= len;
855 } else if(len < c->sacks[i].offset + c->sacks[i].len) {
856 c->sacks[i].len -= len - c->sacks[i].offset;
857 c->sacks[i].offset = 0;
861 memmove(&c->sacks[i], &c->sacks[i + 1], (NSACKS - 1 - i) * sizeof(c->sacks)[i]);
862 c->sacks[NSACKS - 1].len = 0;
870 for(int i = 0; i < NSACKS && c->sacks[i].len; i++) {
871 debug("SACK[%d] offset %u len %u\n", i, c->sacks[i].offset, c->sacks[i].len);
875 static void handle_out_of_order(struct utcp_connection *c, uint32_t offset, const void *data, size_t len) {
876 debug("out of order packet, offset %u\n", offset);
877 // Packet loss or reordering occured. Store the data in the buffer.
878 ssize_t rxd = buffer_put_at(&c->rcvbuf, offset, data, len);
880 if(rxd < 0 || (size_t)rxd < len) {
884 // Make note of where we put it.
885 for(int i = 0; i < NSACKS; i++) {
886 if(!c->sacks[i].len) { // nothing to merge, add new entry
887 debug("New SACK entry %d\n", i);
888 c->sacks[i].offset = offset;
889 c->sacks[i].len = rxd;
891 } else if(offset < c->sacks[i].offset) {
892 if(offset + rxd < c->sacks[i].offset) { // insert before
893 if(!c->sacks[NSACKS - 1].len) { // only if room left
894 debug("Insert SACK entry at %d\n", i);
895 memmove(&c->sacks[i + 1], &c->sacks[i], (NSACKS - i - 1) * sizeof(c->sacks)[i]);
896 c->sacks[i].offset = offset;
897 c->sacks[i].len = rxd;
899 debug("SACK entries full, dropping packet\n");
904 debug("Merge with start of SACK entry at %d\n", i);
905 c->sacks[i].offset = offset;
908 } else if(offset <= c->sacks[i].offset + c->sacks[i].len) {
909 if(offset + rxd > c->sacks[i].offset + c->sacks[i].len) { // merge
910 debug("Merge with end of SACK entry at %d\n", i);
911 c->sacks[i].len = offset + rxd - c->sacks[i].offset;
912 // TODO: handle potential merge with next entry
919 for(int i = 0; i < NSACKS && c->sacks[i].len; i++) {
920 debug("SACK[%d] offset %u len %u\n", i, c->sacks[i].offset, c->sacks[i].len);
924 static void handle_in_order(struct utcp_connection *c, const void *data, size_t len) {
925 // Check if we can process out-of-order data now.
926 if(c->sacks[0].len && len >= c->sacks[0].offset) { // TODO: handle overlap with second SACK
927 debug("incoming packet len %lu connected with SACK at %u\n", (unsigned long)len, c->sacks[0].offset);
928 buffer_put_at(&c->rcvbuf, 0, data, len); // TODO: handle return value
929 len = max(len, c->sacks[0].offset + c->sacks[0].len);
930 data = c->rcvbuf.data;
934 ssize_t rxd = c->recv(c, data, len);
936 if(rxd < 0 || (size_t)rxd != len) {
937 // TODO: handle the application not accepting all data.
943 sack_consume(c, len);
950 static void handle_incoming_data(struct utcp_connection *c, uint32_t seq, const void *data, size_t len) {
951 if(!is_reliable(c)) {
952 c->recv(c, data, len);
953 c->rcv.nxt = seq + len;
957 uint32_t offset = seqdiff(seq, c->rcv.nxt);
959 if(offset + len > c->rcvbuf.maxsize) {
964 handle_out_of_order(c, offset, data, len);
966 handle_in_order(c, data, len);
971 ssize_t utcp_recv(struct utcp *utcp, const void *data, size_t len) {
972 const uint8_t *ptr = data;
988 print_packet(utcp, "recv", data, len);
990 // Drop packets smaller than the header
994 if(len < sizeof(hdr)) {
999 // Make a copy from the potentially unaligned data to a struct hdr
1001 memcpy(&hdr, ptr, sizeof(hdr));
1005 // Drop packets with an unknown CTL flag
1007 if(hdr.ctl & ~(SYN | ACK | RST | FIN)) {
1012 // Check for auxiliary headers
1014 const uint8_t *init = NULL;
1016 uint16_t aux = hdr.aux;
1019 size_t auxlen = 4 * (aux >> 8) & 0xf;
1020 uint8_t auxtype = aux & 0xff;
1029 if(!(hdr.ctl & SYN) || auxlen != 4) {
1045 if(!(aux & 0x800)) {
1054 memcpy(&aux, ptr, 2);
1059 bool has_data = len || (hdr.ctl & (SYN | FIN));
1061 // Try to match the packet to an existing connection
1063 struct utcp_connection *c = find_connection(utcp, hdr.dst, hdr.src);
1065 // Is it for a new connection?
1068 // Ignore RST packets
1074 // Is it a SYN packet and are we LISTENing?
1076 if(hdr.ctl & SYN && !(hdr.ctl & ACK) && utcp->accept) {
1077 // If we don't want to accept it, send a RST back
1078 if((utcp->pre_accept && !utcp->pre_accept(utcp, hdr.dst))) {
1083 // Try to allocate memory, otherwise send a RST back
1084 c = allocate_connection(utcp, hdr.dst, hdr.src);
1091 // Parse auxilliary information
1098 c->flags = init[3] & 0x7;
1100 c->flags = UTCP_TCP;
1104 // Return SYN+ACK, go to SYN_RECEIVED state
1105 c->snd.wnd = hdr.wnd;
1106 c->rcv.irs = hdr.seq;
1107 c->rcv.nxt = c->rcv.irs + 1;
1108 set_state(c, SYN_RECEIVED);
1115 pkt.hdr.src = c->src;
1116 pkt.hdr.dst = c->dst;
1117 pkt.hdr.ack = c->rcv.irs + 1;
1118 pkt.hdr.seq = c->snd.iss;
1119 pkt.hdr.wnd = c->rcv.wnd;
1120 pkt.hdr.ctl = SYN | ACK;
1123 pkt.hdr.aux = 0x0101;
1127 pkt.data[3] = c->flags & 0x7;
1128 print_packet(c->utcp, "send", &pkt, sizeof(hdr) + 4);
1129 utcp->send(utcp, &pkt, sizeof(hdr) + 4);
1132 print_packet(c->utcp, "send", &pkt, sizeof(hdr));
1133 utcp->send(utcp, &pkt, sizeof(hdr));
1136 // No, we don't want your packets, send a RST back
1144 debug("%p state %s\n", c->utcp, strstate[c->state]);
1146 // In case this is for a CLOSED connection, ignore the packet.
1147 // TODO: make it so incoming packets can never match a CLOSED connection.
1149 if(c->state == CLOSED) {
1150 debug("Got packet for closed connection\n");
1154 // It is for an existing connection.
1156 // 1. Drop invalid packets.
1158 // 1a. Drop packets that should not happen in our current state.
1179 // 1b. Discard data that is not in our receive window.
1181 if(is_reliable(c)) {
1184 if(c->state == SYN_SENT) {
1186 } else if(len == 0) {
1187 acceptable = seqdiff(hdr.seq, c->rcv.nxt) >= 0;
1189 int32_t rcv_offset = seqdiff(hdr.seq, c->rcv.nxt);
1191 // cut already accepted front overlapping
1192 if(rcv_offset < 0) {
1193 acceptable = len > (size_t) - rcv_offset;
1198 hdr.seq -= rcv_offset;
1201 acceptable = seqdiff(hdr.seq, c->rcv.nxt) >= 0 && seqdiff(hdr.seq, c->rcv.nxt) + len <= c->rcvbuf.maxsize;
1206 debug("Packet not acceptable, %u <= %u + %lu < %u\n", c->rcv.nxt, hdr.seq, (unsigned long)len, c->rcv.nxt + c->rcvbuf.maxsize);
1208 // Ignore unacceptable RST packets.
1213 // Otherwise, continue processing.
1218 c->snd.wnd = hdr.wnd; // TODO: move below
1220 // 1c. Drop packets with an invalid ACK.
1221 // ackno should not roll back, and it should also not be bigger than what we ever could have sent
1222 // (= snd.una + c->sndbuf.used).
1224 if(!is_reliable(c)) {
1225 if(hdr.ack != c->snd.last && c->state >= ESTABLISHED) {
1226 hdr.ack = c->snd.una;
1230 if(hdr.ctl & ACK && (seqdiff(hdr.ack, c->snd.last) > 0 || seqdiff(hdr.ack, c->snd.una) < 0)) {
1231 debug("Packet ack seqno out of range, %u <= %u < %u\n", c->snd.una, hdr.ack, c->snd.una + c->sndbuf.used);
1233 // Ignore unacceptable RST packets.
1241 // 2. Handle RST packets
1246 if(!(hdr.ctl & ACK)) {
1250 // The peer has refused our connection.
1251 set_state(c, CLOSED);
1252 errno = ECONNREFUSED;
1255 c->recv(c, NULL, 0);
1258 if(c->poll && !c->reapable) {
1269 // We haven't told the application about this connection yet. Silently delete.
1281 // The peer has aborted our connection.
1282 set_state(c, CLOSED);
1286 c->recv(c, NULL, 0);
1289 if(c->poll && !c->reapable) {
1302 // As far as the application is concerned, the connection has already been closed.
1303 // If it has called utcp_close() already, we can immediately free this connection.
1309 // Otherwise, immediately move to the CLOSED state.
1310 set_state(c, CLOSED);
1323 if(!(hdr.ctl & ACK)) {
1328 // 3. Advance snd.una
1330 advanced = seqdiff(hdr.ack, c->snd.una);
1334 if(c->rtt_start.tv_sec) {
1335 if(c->rtt_seq == hdr.ack) {
1336 struct timeval now, diff;
1337 gettimeofday(&now, NULL);
1338 timersub(&now, &c->rtt_start, &diff);
1339 update_rtt(c, diff.tv_sec * 1000000 + diff.tv_usec);
1340 c->rtt_start.tv_sec = 0;
1341 } else if(c->rtt_seq < hdr.ack) {
1342 debug("Cancelling RTT measurement: %u < %u\n", c->rtt_seq, hdr.ack);
1343 c->rtt_start.tv_sec = 0;
1347 int32_t data_acked = advanced;
1355 // TODO: handle FIN as well.
1360 assert(data_acked >= 0);
1363 int32_t bufused = seqdiff(c->snd.last, c->snd.una);
1364 assert(data_acked <= bufused);
1368 buffer_discard(&c->sndbuf, data_acked);
1371 // Also advance snd.nxt if possible
1372 if(seqdiff(c->snd.nxt, hdr.ack) < 0) {
1373 c->snd.nxt = hdr.ack;
1376 c->snd.una = hdr.ack;
1379 c->snd.cwnd += utcp->mtu;
1381 if(c->snd.cwnd > c->sndbuf.maxsize) {
1382 c->snd.cwnd = c->sndbuf.maxsize;
1385 // Check if we have sent a FIN that is now ACKed.
1388 if(c->snd.una == c->snd.last) {
1389 set_state(c, FIN_WAIT_2);
1395 if(c->snd.una == c->snd.last) {
1396 gettimeofday(&c->conn_timeout, NULL);
1397 c->conn_timeout.tv_sec += utcp->timeout;
1398 set_state(c, TIME_WAIT);
1407 if(!len && is_reliable(c)) {
1410 if(c->dupack == 3) {
1411 debug("Triplicate ACK\n");
1412 //TODO: Resend one packet and go to fast recovery mode. See RFC 6582.
1413 //We do a very simple variant here; reset the nxt pointer to the last acknowledged packet from the peer.
1414 //Reset the congestion window so we wait for ACKs.
1415 c->snd.nxt = c->snd.una;
1416 c->snd.cwnd = utcp->mtu;
1417 start_retransmit_timer(c);
1425 if(c->snd.una == c->snd.last) {
1426 stop_retransmit_timer(c);
1427 timerclear(&c->conn_timeout);
1428 } else if(is_reliable(c)) {
1429 start_retransmit_timer(c);
1430 gettimeofday(&c->conn_timeout, NULL);
1431 c->conn_timeout.tv_sec += utcp->timeout;
1436 // 5. Process SYN stuff
1442 // This is a SYNACK. It should always have ACKed the SYN.
1447 c->rcv.irs = hdr.seq;
1448 c->rcv.nxt = hdr.seq;
1452 set_state(c, FIN_WAIT_1);
1454 set_state(c, ESTABLISHED);
1457 // TODO: notify application of this somehow.
1461 // This is a retransmit of a SYN, send back the SYNACK.
1471 // Ehm, no. We should never receive a second SYN.
1481 // SYN counts as one sequence number
1485 // 6. Process new data
1487 if(c->state == SYN_RECEIVED) {
1488 // This is the ACK after the SYNACK. It should always have ACKed the SYNACK.
1493 // Are we still LISTENing?
1495 utcp->accept(c, c->src);
1498 if(c->state != ESTABLISHED) {
1499 set_state(c, CLOSED);
1509 // This should never happen.
1524 // Ehm no, We should never receive more data after a FIN.
1534 handle_incoming_data(c, hdr.seq, ptr, len);
1537 // 7. Process FIN stuff
1539 if((hdr.ctl & FIN) && (!is_reliable(c) || hdr.seq + len == c->rcv.nxt)) {
1543 // This should never happen.
1550 set_state(c, CLOSE_WAIT);
1554 set_state(c, CLOSING);
1558 gettimeofday(&c->conn_timeout, NULL);
1559 c->conn_timeout.tv_sec += utcp->timeout;
1560 set_state(c, TIME_WAIT);
1567 // Ehm, no. We should never receive a second FIN.
1577 // FIN counts as one sequence number
1581 // Inform the application that the peer closed its end of the connection.
1584 c->recv(c, NULL, 0);
1588 // Now we send something back if:
1589 // - we received data, so we have to send back an ACK
1590 // -> sendatleastone = true
1591 // - or we got an ack, so we should maybe send a bit more data
1592 // -> sendatleastone = false
1594 if(is_reliable(c) || hdr.ctl & SYN || hdr.ctl & FIN) {
1609 hdr.ack = hdr.seq + len;
1611 hdr.ctl = RST | ACK;
1614 print_packet(utcp, "send", &hdr, sizeof(hdr));
1615 utcp->send(utcp, &hdr, sizeof(hdr));
1620 int utcp_shutdown(struct utcp_connection *c, int dir) {
1621 debug("%p shutdown %d at %u\n", c ? c->utcp : NULL, dir, c ? c->snd.last : 0);
1629 debug("Error: shutdown() called on closed connection %p\n", c);
1634 if(!(dir == UTCP_SHUT_RD || dir == UTCP_SHUT_WR || dir == UTCP_SHUT_RDWR)) {
1639 // TCP does not have a provision for stopping incoming packets.
1640 // The best we can do is to just ignore them.
1641 if(dir == UTCP_SHUT_RD || dir == UTCP_SHUT_RDWR) {
1645 // The rest of the code deals with shutting down writes.
1646 if(dir == UTCP_SHUT_RD) {
1650 // Only process shutting down writes once.
1668 set_state(c, FIN_WAIT_1);
1676 set_state(c, CLOSING);
1689 if(!timerisset(&c->rtrx_timeout)) {
1690 start_retransmit_timer(c);
1696 static bool reset_connection(struct utcp_connection *c) {
1703 debug("Error: abort() called on closed connection %p\n", c);
1720 set_state(c, CLOSED);
1728 set_state(c, CLOSED);
1738 hdr.seq = c->snd.nxt;
1743 print_packet(c->utcp, "send", &hdr, sizeof(hdr));
1744 c->utcp->send(c->utcp, &hdr, sizeof(hdr));
1748 // Closes all the opened connections
1749 void utcp_abort_all_connections(struct utcp *utcp) {
1755 for(int i = 0; i < utcp->nconnections; i++) {
1756 struct utcp_connection *c = utcp->connections[i];
1758 if(c->reapable || c->state == CLOSED) {
1762 utcp_recv_t old_recv = c->recv;
1763 utcp_poll_t old_poll = c->poll;
1765 reset_connection(c);
1769 old_recv(c, NULL, 0);
1772 if(old_poll && !c->reapable) {
1781 int utcp_close(struct utcp_connection *c) {
1782 if(utcp_shutdown(c, SHUT_RDWR) && errno != ENOTCONN) {
1792 int utcp_abort(struct utcp_connection *c) {
1793 if(!reset_connection(c)) {
1802 * One call to this function will loop through all connections,
1803 * checking if something needs to be resent or not.
1804 * The return value is the time to the next timeout in milliseconds,
1805 * or maybe a negative value if the timeout is infinite.
1807 struct timeval utcp_timeout(struct utcp *utcp) {
1809 gettimeofday(&now, NULL);
1810 struct timeval next = {now.tv_sec + 3600, now.tv_usec};
1812 for(int i = 0; i < utcp->nconnections; i++) {
1813 struct utcp_connection *c = utcp->connections[i];
1819 // delete connections that have been utcp_close()d.
1820 if(c->state == CLOSED) {
1822 debug("Reaping %p\n", c);
1830 if(timerisset(&c->conn_timeout) && timercmp(&c->conn_timeout, &now, <)) {
1835 c->recv(c, NULL, 0);
1838 if(c->poll && !c->reapable) {
1845 if(timerisset(&c->rtrx_timeout) && timercmp(&c->rtrx_timeout, &now, <)) {
1846 debug("retransmit()\n");
1851 if((c->state == ESTABLISHED || c->state == CLOSE_WAIT)) {
1852 uint32_t len = buffer_free(&c->sndbuf);
1857 } else if(c->state == CLOSED) {
1862 if(timerisset(&c->conn_timeout) && timercmp(&c->conn_timeout, &next, <)) {
1863 next = c->conn_timeout;
1866 if(timerisset(&c->rtrx_timeout) && timercmp(&c->rtrx_timeout, &next, <)) {
1867 next = c->rtrx_timeout;
1871 struct timeval diff;
1873 timersub(&next, &now, &diff);
1878 bool utcp_is_active(struct utcp *utcp) {
1883 for(int i = 0; i < utcp->nconnections; i++)
1884 if(utcp->connections[i]->state != CLOSED && utcp->connections[i]->state != TIME_WAIT) {
1891 struct utcp *utcp_init(utcp_accept_t accept, utcp_pre_accept_t pre_accept, utcp_send_t send, void *priv) {
1897 struct utcp *utcp = calloc(1, sizeof(*utcp));
1903 utcp->accept = accept;
1904 utcp->pre_accept = pre_accept;
1907 utcp_set_mtu(utcp, DEFAULT_MTU);
1908 utcp->timeout = DEFAULT_USER_TIMEOUT; // sec
1909 utcp->rto = START_RTO; // usec
1914 void utcp_exit(struct utcp *utcp) {
1919 for(int i = 0; i < utcp->nconnections; i++) {
1920 struct utcp_connection *c = utcp->connections[i];
1924 c->recv(c, NULL, 0);
1927 if(c->poll && !c->reapable) {
1932 buffer_exit(&c->rcvbuf);
1933 buffer_exit(&c->sndbuf);
1937 free(utcp->connections);
1941 uint16_t utcp_get_mtu(struct utcp *utcp) {
1942 return utcp ? utcp->mtu : 0;
1945 void utcp_set_mtu(struct utcp *utcp, uint16_t mtu) {
1950 if(mtu <= sizeof(struct hdr)) {
1954 if(mtu > utcp->mtu) {
1955 char *new = realloc(utcp->pkt, mtu + sizeof(struct hdr));
1967 void utcp_reset_timers(struct utcp *utcp) {
1972 struct timeval now, then;
1974 gettimeofday(&now, NULL);
1978 then.tv_sec += utcp->timeout;
1980 for(int i = 0; i < utcp->nconnections; i++) {
1981 struct utcp_connection *c = utcp->connections[i];
1987 if(timerisset(&c->rtrx_timeout)) {
1988 c->rtrx_timeout = now;
1991 if(timerisset(&c->conn_timeout)) {
1992 c->conn_timeout = then;
1995 c->rtt_start.tv_sec = 0;
1998 if(utcp->rto > START_RTO) {
1999 utcp->rto = START_RTO;
2003 int utcp_get_user_timeout(struct utcp *u) {
2004 return u ? u->timeout : 0;
2007 void utcp_set_user_timeout(struct utcp *u, int timeout) {
2009 u->timeout = timeout;
2013 size_t utcp_get_sndbuf(struct utcp_connection *c) {
2014 return c ? c->sndbuf.maxsize : 0;
2017 size_t utcp_get_sndbuf_free(struct utcp_connection *c) {
2027 return buffer_free(&c->sndbuf);
2034 void utcp_set_sndbuf(struct utcp_connection *c, size_t size) {
2039 c->sndbuf.maxsize = size;
2041 if(c->sndbuf.maxsize != size) {
2042 c->sndbuf.maxsize = -1;
2046 size_t utcp_get_rcvbuf(struct utcp_connection *c) {
2047 return c ? c->rcvbuf.maxsize : 0;
2050 size_t utcp_get_rcvbuf_free(struct utcp_connection *c) {
2051 if(c && (c->state == ESTABLISHED || c->state == CLOSE_WAIT)) {
2052 return buffer_free(&c->rcvbuf);
2058 void utcp_set_rcvbuf(struct utcp_connection *c, size_t size) {
2063 c->rcvbuf.maxsize = size;
2065 if(c->rcvbuf.maxsize != size) {
2066 c->rcvbuf.maxsize = -1;
2070 size_t utcp_get_sendq(struct utcp_connection *c) {
2071 return c->sndbuf.used;
2074 size_t utcp_get_recvq(struct utcp_connection *c) {
2075 return c->rcvbuf.used;
2078 bool utcp_get_nodelay(struct utcp_connection *c) {
2079 return c ? c->nodelay : false;
2082 void utcp_set_nodelay(struct utcp_connection *c, bool nodelay) {
2084 c->nodelay = nodelay;
2088 bool utcp_get_keepalive(struct utcp_connection *c) {
2089 return c ? c->keepalive : false;
2092 void utcp_set_keepalive(struct utcp_connection *c, bool keepalive) {
2094 c->keepalive = keepalive;
2098 size_t utcp_get_outq(struct utcp_connection *c) {
2099 return c ? seqdiff(c->snd.nxt, c->snd.una) : 0;
2102 void utcp_set_recv_cb(struct utcp_connection *c, utcp_recv_t recv) {
2108 void utcp_set_poll_cb(struct utcp_connection *c, utcp_poll_t poll) {
2114 void utcp_set_accept_cb(struct utcp *utcp, utcp_accept_t accept, utcp_pre_accept_t pre_accept) {
2116 utcp->accept = accept;
2117 utcp->pre_accept = pre_accept;
2121 void utcp_expect_data(struct utcp_connection *c, bool expect) {
2122 if(!c || c->reapable) {
2126 if(!(c->state == ESTABLISHED || c->state == FIN_WAIT_1 || c->state == FIN_WAIT_2)) {
2131 // If we expect data, start the connection timer.
2132 if(!timerisset(&c->conn_timeout)) {
2133 gettimeofday(&c->conn_timeout, NULL);
2134 c->conn_timeout.tv_sec += c->utcp->timeout;
2137 // If we want to cancel expecting data, only clear the timer when there is no unACKed data.
2138 if(c->snd.una == c->snd.last) {
2139 timerclear(&c->conn_timeout);
2144 void utcp_offline(struct utcp *utcp, bool offline) {
2146 gettimeofday(&now, NULL);
2148 for(int i = 0; i < utcp->nconnections; i++) {
2149 struct utcp_connection *c = utcp->connections[i];
2155 utcp_expect_data(c, offline);
2158 if(timerisset(&c->rtrx_timeout)) {
2159 c->rtrx_timeout = now;
2162 utcp->connections[i]->rtt_start.tv_sec = 0;
2166 if(!offline && utcp->rto > START_RTO) {
2167 utcp->rto = START_RTO;