2 utcp.c -- Userspace TCP
3 Copyright (C) 2014-2017 Guus Sliepen <guus@tinc-vpn.org>
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License along
16 with this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
32 #include "utcp_priv.h"
47 #if defined(CLOCK_MONOTONIC_RAW) && defined(__x86_64__)
48 #define UTCP_CLOCK CLOCK_MONOTONIC_RAW
50 #define UTCP_CLOCK CLOCK_MONOTONIC
54 static void timespec_sub(const struct timespec *a, const struct timespec *b, struct timespec *r) {
55 r->tv_sec = a->tv_sec - b->tv_sec;
56 r->tv_nsec = a->tv_nsec - b->tv_nsec;
59 r->tv_sec--, r->tv_nsec += NSEC_PER_SEC;
63 static int32_t timespec_diff_usec(const struct timespec *a, const struct timespec *b) {
64 int64_t diff = (a->tv_sec - b->tv_sec) * 1000000000 + a->tv_sec - b->tv_sec;
68 static bool timespec_lt(const struct timespec *a, const struct timespec *b) {
69 if(a->tv_sec == b->tv_sec) {
70 return a->tv_nsec < b->tv_nsec;
72 return a->tv_sec < b->tv_sec;
76 static void timespec_clear(struct timespec *a) {
80 static bool timespec_isset(const struct timespec *a) {
84 static long CLOCK_GRANULARITY; // usec
86 static inline size_t min(size_t a, size_t b) {
90 static inline size_t max(size_t a, size_t b) {
97 #ifndef UTCP_DEBUG_DATALEN
98 #define UTCP_DEBUG_DATALEN 20
101 static void debug(struct utcp_connection *c, const char *format, ...) {
106 clock_gettime(CLOCK_REALTIME, &tv);
107 len = snprintf(buf, sizeof(buf), "%ld.%06lu %u:%u ", (long)tv.tv_sec, tv.tv_nsec / 1000, c ? c->src : 0, c ? c->dst : 0);
109 va_start(ap, format);
110 len += vsnprintf(buf + len, sizeof(buf) - len, format, ap);
113 if(len > 0 && (size_t)len < sizeof(buf)) {
114 fwrite(buf, len, 1, stderr);
118 static void print_packet(struct utcp_connection *c, const char *dir, const void *pkt, size_t len) {
121 if(len < sizeof(hdr)) {
122 debug(c, "%s: short packet (%lu bytes)\n", dir, (unsigned long)len);
126 memcpy(&hdr, pkt, sizeof(hdr));
130 if(len > sizeof(hdr)) {
131 datalen = min(len - sizeof(hdr), UTCP_DEBUG_DATALEN);
137 const uint8_t *data = (uint8_t *)pkt + sizeof(hdr);
138 char str[datalen * 2 + 1];
141 for(uint32_t i = 0; i < datalen; i++) {
142 *p++ = "0123456789ABCDEF"[data[i] >> 4];
143 *p++ = "0123456789ABCDEF"[data[i] & 15];
148 debug(c, "%s: len %lu src %u dst %u seq %u ack %u wnd %u aux %x ctl %s%s%s%s data %s\n",
149 dir, (unsigned long)len, hdr.src, hdr.dst, hdr.seq, hdr.ack, hdr.wnd, hdr.aux,
150 hdr.ctl & SYN ? "SYN" : "",
151 hdr.ctl & RST ? "RST" : "",
152 hdr.ctl & FIN ? "FIN" : "",
153 hdr.ctl & ACK ? "ACK" : "",
158 static void debug_cwnd(struct utcp_connection *c) {
159 debug(c, "snd.cwnd %u snd.ssthresh %u\n", c->snd.cwnd, ~c->snd.ssthresh ? c->snd.ssthresh : 0);
162 #define debug(...) do {} while(0)
163 #define print_packet(...) do {} while(0)
164 #define debug_cwnd(...) do {} while(0)
167 static void set_state(struct utcp_connection *c, enum state state) {
170 if(state == ESTABLISHED) {
171 timespec_clear(&c->conn_timeout);
174 debug(c, "state %s\n", strstate[state]);
177 static bool fin_wanted(struct utcp_connection *c, uint32_t seq) {
178 if(seq != c->snd.last) {
193 static bool is_reliable(struct utcp_connection *c) {
194 return c->flags & UTCP_RELIABLE;
197 static int32_t seqdiff(uint32_t a, uint32_t b) {
202 static bool buffer_wraps(struct buffer *buf) {
203 return buf->size - buf->offset < buf->used;
206 static bool buffer_resize(struct buffer *buf, uint32_t newsize) {
207 char *newdata = realloc(buf->data, newsize);
215 if(buffer_wraps(buf)) {
216 // Shift the right part of the buffer until it hits the end of the new buffer.
220 // [345.........|........012]
221 uint32_t tailsize = buf->size - buf->offset;
222 uint32_t newoffset = newsize - tailsize;
223 memmove(buf->data + newoffset, buf->data + buf->offset, tailsize);
224 buf->offset = newoffset;
231 // Store data into the buffer
232 static ssize_t buffer_put_at(struct buffer *buf, size_t offset, const void *data, size_t len) {
233 debug(NULL, "buffer_put_at %lu %lu %lu\n", (unsigned long)buf->used, (unsigned long)offset, (unsigned long)len);
235 // Ensure we don't store more than maxsize bytes in total
236 size_t required = offset + len;
238 if(required > buf->maxsize) {
239 if(offset >= buf->maxsize) {
243 len = buf->maxsize - offset;
244 required = buf->maxsize;
247 // Check if we need to resize the buffer
248 if(required > buf->size) {
249 size_t newsize = buf->size;
257 } while(newsize < required);
259 if(newsize > buf->maxsize) {
260 newsize = buf->maxsize;
263 if(!buffer_resize(buf, newsize)) {
268 uint32_t realoffset = buf->offset + offset;
270 if(buf->size - buf->offset < offset) {
271 // The offset wrapped
272 realoffset -= buf->size;
275 if(buf->size - realoffset < len) {
276 // The new chunk of data must be wrapped
277 memcpy(buf->data + realoffset, data, buf->size - realoffset);
278 memcpy(buf->data, (char *)data + buf->size - realoffset, len - (buf->size - realoffset));
280 memcpy(buf->data + realoffset, data, len);
283 if(required > buf->used) {
284 buf->used = required;
290 static ssize_t buffer_put(struct buffer *buf, const void *data, size_t len) {
291 return buffer_put_at(buf, buf->used, data, len);
294 // Copy data from the buffer without removing it.
295 static ssize_t buffer_copy(struct buffer *buf, void *data, size_t offset, size_t len) {
296 // Ensure we don't copy more than is actually stored in the buffer
297 if(offset >= buf->used) {
301 if(buf->used - offset < len) {
302 len = buf->used - offset;
305 uint32_t realoffset = buf->offset + offset;
307 if(buf->size - buf->offset < offset) {
308 // The offset wrapped
309 realoffset -= buf->size;
312 if(buf->size - realoffset < len) {
313 // The data is wrapped
314 memcpy(data, buf->data + realoffset, buf->size - realoffset);
315 memcpy((char *)data + buf->size - realoffset, buf->data, len - (buf->size - realoffset));
317 memcpy(data, buf->data + realoffset, len);
323 // Discard data from the buffer.
324 static ssize_t buffer_discard(struct buffer *buf, size_t len) {
325 if(buf->used < len) {
329 if(buf->size - buf->offset < len) {
330 buf->offset -= buf->size;
339 static bool buffer_set_size(struct buffer *buf, uint32_t minsize, uint32_t maxsize) {
340 if(maxsize < minsize) {
344 buf->maxsize = maxsize;
346 return buf->size >= minsize || buffer_resize(buf, minsize);
349 static void buffer_exit(struct buffer *buf) {
351 memset(buf, 0, sizeof(*buf));
354 static uint32_t buffer_free(const struct buffer *buf) {
355 return buf->maxsize - buf->used;
358 // Connections are stored in a sorted list.
359 // This gives O(log(N)) lookup time, O(N log(N)) insertion time and O(N) deletion time.
361 static int compare(const void *va, const void *vb) {
364 const struct utcp_connection *a = *(struct utcp_connection **)va;
365 const struct utcp_connection *b = *(struct utcp_connection **)vb;
368 assert(a->src && b->src);
370 int c = (int)a->src - (int)b->src;
376 c = (int)a->dst - (int)b->dst;
380 static struct utcp_connection *find_connection(const struct utcp *utcp, uint16_t src, uint16_t dst) {
381 if(!utcp->nconnections) {
385 struct utcp_connection key = {
389 struct utcp_connection **match = bsearch(&keyp, utcp->connections, utcp->nconnections, sizeof(*utcp->connections), compare);
390 return match ? *match : NULL;
393 static void free_connection(struct utcp_connection *c) {
394 struct utcp *utcp = c->utcp;
395 struct utcp_connection **cp = bsearch(&c, utcp->connections, utcp->nconnections, sizeof(*utcp->connections), compare);
399 int i = cp - utcp->connections;
400 memmove(cp, cp + 1, (utcp->nconnections - i - 1) * sizeof(*cp));
401 utcp->nconnections--;
403 buffer_exit(&c->rcvbuf);
404 buffer_exit(&c->sndbuf);
408 static struct utcp_connection *allocate_connection(struct utcp *utcp, uint16_t src, uint16_t dst) {
409 // Check whether this combination of src and dst is free
412 if(find_connection(utcp, src, dst)) {
416 } else { // If src == 0, generate a random port number with the high bit set
417 if(utcp->nconnections >= 32767) {
422 src = rand() | 0x8000;
424 while(find_connection(utcp, src, dst)) {
429 // Allocate memory for the new connection
431 if(utcp->nconnections >= utcp->nallocated) {
432 if(!utcp->nallocated) {
433 utcp->nallocated = 4;
435 utcp->nallocated *= 2;
438 struct utcp_connection **new_array = realloc(utcp->connections, utcp->nallocated * sizeof(*utcp->connections));
444 utcp->connections = new_array;
447 struct utcp_connection *c = calloc(1, sizeof(*c));
453 if(!buffer_set_size(&c->sndbuf, DEFAULT_SNDBUFSIZE, DEFAULT_MAXSNDBUFSIZE)) {
458 if(!buffer_set_size(&c->rcvbuf, DEFAULT_RCVBUFSIZE, DEFAULT_MAXRCVBUFSIZE)) {
459 buffer_exit(&c->sndbuf);
464 // Fill in the details
473 c->snd.una = c->snd.iss;
474 c->snd.nxt = c->snd.iss + 1;
475 c->snd.last = c->snd.nxt;
476 c->snd.cwnd = (utcp->mss > 2190 ? 2 : utcp->mss > 1095 ? 3 : 4) * utcp->mss;
477 c->snd.ssthresh = ~0;
481 // Add it to the sorted list of connections
483 utcp->connections[utcp->nconnections++] = c;
484 qsort(utcp->connections, utcp->nconnections, sizeof(*utcp->connections), compare);
489 static inline uint32_t absdiff(uint32_t a, uint32_t b) {
497 // Update RTT variables. See RFC 6298.
498 static void update_rtt(struct utcp_connection *c, uint32_t rtt) {
500 debug(c, "invalid rtt\n");
504 struct utcp *utcp = c->utcp;
508 utcp->rttvar = rtt / 2;
510 utcp->rttvar = (utcp->rttvar * 3 + absdiff(utcp->srtt, rtt)) / 4;
511 utcp->srtt = (utcp->srtt * 7 + rtt) / 8;
514 utcp->rto = utcp->srtt + max(4 * utcp->rttvar, CLOCK_GRANULARITY);
516 if(utcp->rto > MAX_RTO) {
520 debug(c, "rtt %u srtt %u rttvar %u rto %u\n", rtt, utcp->srtt, utcp->rttvar, utcp->rto);
523 static void start_retransmit_timer(struct utcp_connection *c) {
524 clock_gettime(UTCP_CLOCK, &c->rtrx_timeout);
526 uint32_t rto = c->utcp->rto;
528 while(rto > USEC_PER_SEC) {
529 c->rtrx_timeout.tv_sec++;
533 c->rtrx_timeout.tv_nsec += c->utcp->rto * 1000;
535 if(c->rtrx_timeout.tv_nsec >= NSEC_PER_SEC) {
536 c->rtrx_timeout.tv_nsec -= NSEC_PER_SEC;
537 c->rtrx_timeout.tv_sec++;
540 debug(c, "rtrx_timeout %ld.%06lu\n", c->rtrx_timeout.tv_sec, c->rtrx_timeout.tv_nsec);
543 static void stop_retransmit_timer(struct utcp_connection *c) {
544 timespec_clear(&c->rtrx_timeout);
545 debug(c, "rtrx_timeout cleared\n");
548 struct utcp_connection *utcp_connect_ex(struct utcp *utcp, uint16_t dst, utcp_recv_t recv, void *priv, uint32_t flags) {
549 struct utcp_connection *c = allocate_connection(utcp, 0, dst);
555 assert((flags & ~0x1f) == 0);
566 pkt.hdr.src = c->src;
567 pkt.hdr.dst = c->dst;
568 pkt.hdr.seq = c->snd.iss;
570 pkt.hdr.wnd = c->rcvbuf.maxsize;
572 pkt.hdr.aux = 0x0101;
576 pkt.init[3] = flags & 0x7;
578 set_state(c, SYN_SENT);
580 print_packet(c, "send", &pkt, sizeof(pkt));
581 utcp->send(utcp, &pkt, sizeof(pkt));
583 clock_gettime(UTCP_CLOCK, &c->conn_timeout);
584 c->conn_timeout.tv_sec += utcp->timeout;
586 start_retransmit_timer(c);
591 struct utcp_connection *utcp_connect(struct utcp *utcp, uint16_t dst, utcp_recv_t recv, void *priv) {
592 return utcp_connect_ex(utcp, dst, recv, priv, UTCP_TCP);
595 void utcp_accept(struct utcp_connection *c, utcp_recv_t recv, void *priv) {
596 if(c->reapable || c->state != SYN_RECEIVED) {
597 debug(c, "accept() called on invalid connection in state %s\n", c, strstate[c->state]);
601 debug(c, "accepted %p %p\n", c, recv, priv);
604 set_state(c, ESTABLISHED);
607 static void ack(struct utcp_connection *c, bool sendatleastone) {
608 int32_t left = seqdiff(c->snd.last, c->snd.nxt);
609 int32_t cwndleft = min(c->snd.cwnd, c->snd.wnd) - seqdiff(c->snd.nxt, c->snd.una);
615 } else if(cwndleft < left) {
618 if(!sendatleastone || cwndleft > c->utcp->mss) {
619 left -= left % c->utcp->mss;
623 debug(c, "cwndleft %d left %d\n", cwndleft, left);
625 if(!left && !sendatleastone) {
632 } *pkt = c->utcp->pkt;
634 pkt->hdr.src = c->src;
635 pkt->hdr.dst = c->dst;
636 pkt->hdr.ack = c->rcv.nxt;
637 pkt->hdr.wnd = c->rcvbuf.maxsize;
642 uint32_t seglen = left > c->utcp->mss ? c->utcp->mss : left;
643 pkt->hdr.seq = c->snd.nxt;
645 buffer_copy(&c->sndbuf, pkt->data, seqdiff(c->snd.nxt, c->snd.una), seglen);
647 c->snd.nxt += seglen;
650 if(seglen && fin_wanted(c, c->snd.nxt)) {
655 if(!c->rtt_start.tv_sec) {
656 // Start RTT measurement
657 clock_gettime(UTCP_CLOCK, &c->rtt_start);
658 c->rtt_seq = pkt->hdr.seq + seglen;
659 debug(c, "starting RTT measurement, expecting ack %u\n", c->rtt_seq);
662 print_packet(c, "send", pkt, sizeof(pkt->hdr) + seglen);
663 c->utcp->send(c->utcp, pkt, sizeof(pkt->hdr) + seglen);
667 ssize_t utcp_send(struct utcp_connection *c, const void *data, size_t len) {
669 debug(c, "send() called on closed connection\n");
677 debug(c, "send() called on unconnected connection\n");
692 debug(c, "send() called on closed connection\n");
697 // Exit early if we have nothing to send.
708 // Check if we need to be able to buffer all data
710 if(c->flags & UTCP_NO_PARTIAL) {
711 if(len > buffer_free(&c->sndbuf)) {
712 if(len > c->sndbuf.maxsize) {
722 // Add data to send buffer.
724 if(is_reliable(c) || (c->state != SYN_SENT && c->state != SYN_RECEIVED)) {
725 len = buffer_put(&c->sndbuf, data, len);
741 // Don't send anything yet if the connection has not fully established yet
743 if(c->state == SYN_SENT || c->state == SYN_RECEIVED) {
749 if(!is_reliable(c)) {
750 c->snd.una = c->snd.nxt = c->snd.last;
751 buffer_discard(&c->sndbuf, c->sndbuf.used);
755 if(is_reliable(c) && !timespec_isset(&c->rtrx_timeout)) {
756 start_retransmit_timer(c);
759 if(is_reliable(c) && !timespec_isset(&c->conn_timeout)) {
760 clock_gettime(UTCP_CLOCK, &c->conn_timeout);
761 c->conn_timeout.tv_sec += c->utcp->timeout;
767 static void swap_ports(struct hdr *hdr) {
768 uint16_t tmp = hdr->src;
773 static void fast_retransmit(struct utcp_connection *c) {
774 if(c->state == CLOSED || c->snd.last == c->snd.una) {
775 debug(c, "fast_retransmit() called but nothing to retransmit!\n");
779 struct utcp *utcp = c->utcp;
786 pkt = malloc(c->utcp->mtu);
792 pkt->hdr.src = c->src;
793 pkt->hdr.dst = c->dst;
794 pkt->hdr.wnd = c->rcvbuf.maxsize;
803 // Send unacked data again.
804 pkt->hdr.seq = c->snd.una;
805 pkt->hdr.ack = c->rcv.nxt;
807 uint32_t len = min(seqdiff(c->snd.last, c->snd.una), utcp->mss);
809 if(fin_wanted(c, c->snd.una + len)) {
814 buffer_copy(&c->sndbuf, pkt->data, 0, len);
815 print_packet(c, "rtrx", pkt, sizeof(pkt->hdr) + len);
816 utcp->send(utcp, pkt, sizeof(pkt->hdr) + len);
826 static void retransmit(struct utcp_connection *c) {
827 if(c->state == CLOSED || c->snd.last == c->snd.una) {
828 debug(c, "retransmit() called but nothing to retransmit!\n");
829 stop_retransmit_timer(c);
833 struct utcp *utcp = c->utcp;
838 } *pkt = c->utcp->pkt;
840 pkt->hdr.src = c->src;
841 pkt->hdr.dst = c->dst;
842 pkt->hdr.wnd = c->rcvbuf.maxsize;
847 // Send our SYN again
848 pkt->hdr.seq = c->snd.iss;
851 pkt->hdr.aux = 0x0101;
855 pkt->data[3] = c->flags & 0x7;
856 print_packet(c, "rtrx", pkt, sizeof(pkt->hdr) + 4);
857 utcp->send(utcp, pkt, sizeof(pkt->hdr) + 4);
862 pkt->hdr.seq = c->snd.nxt;
863 pkt->hdr.ack = c->rcv.nxt;
864 pkt->hdr.ctl = SYN | ACK;
865 print_packet(c, "rtrx", pkt, sizeof(pkt->hdr));
866 utcp->send(utcp, pkt, sizeof(pkt->hdr));
874 // Send unacked data again.
875 pkt->hdr.seq = c->snd.una;
876 pkt->hdr.ack = c->rcv.nxt;
878 uint32_t len = min(seqdiff(c->snd.last, c->snd.una), utcp->mss);
880 if(fin_wanted(c, c->snd.una + len)) {
885 // RFC 5681 slow start after timeout
886 uint32_t flightsize = seqdiff(c->snd.nxt, c->snd.una);
887 c->snd.ssthresh = max(flightsize / 2, utcp->mss * 2); // eq. 4
888 c->snd.cwnd = utcp->mss;
891 buffer_copy(&c->sndbuf, pkt->data, 0, len);
892 print_packet(c, "rtrx", pkt, sizeof(pkt->hdr) + len);
893 utcp->send(utcp, pkt, sizeof(pkt->hdr) + len);
895 c->snd.nxt = c->snd.una + len;
902 // We shouldn't need to retransmit anything in this state.
906 stop_retransmit_timer(c);
910 start_retransmit_timer(c);
913 if(utcp->rto > MAX_RTO) {
917 c->rtt_start.tv_sec = 0; // invalidate RTT timer
918 c->dupack = 0; // cancel any ongoing fast recovery
924 /* Update receive buffer and SACK entries after consuming data.
928 * |.....0000..1111111111.....22222......3333|
931 * 0..3 represent the SACK entries. The ^ indicates up to which point we want
932 * to remove data from the receive buffer. The idea is to substract "len"
933 * from the offset of all the SACK entries, and then remove/cut down entries
934 * that are shifted to before the start of the receive buffer.
936 * There are three cases:
937 * - the SACK entry is after ^, in that case just change the offset.
938 * - the SACK entry starts before and ends after ^, so we have to
939 * change both its offset and size.
940 * - the SACK entry is completely before ^, in that case delete it.
942 static void sack_consume(struct utcp_connection *c, size_t len) {
943 debug(c, "sack_consume %lu\n", (unsigned long)len);
945 if(len > c->rcvbuf.used) {
946 debug(c, "all SACK entries consumed\n");
951 buffer_discard(&c->rcvbuf, len);
953 for(int i = 0; i < NSACKS && c->sacks[i].len;) {
954 if(len < c->sacks[i].offset) {
955 c->sacks[i].offset -= len;
957 } else if(len < c->sacks[i].offset + c->sacks[i].len) {
958 c->sacks[i].len -= len - c->sacks[i].offset;
959 c->sacks[i].offset = 0;
963 memmove(&c->sacks[i], &c->sacks[i + 1], (NSACKS - 1 - i) * sizeof(c->sacks)[i]);
964 c->sacks[NSACKS - 1].len = 0;
972 for(int i = 0; i < NSACKS && c->sacks[i].len; i++) {
973 debug(c, "SACK[%d] offset %u len %u\n", i, c->sacks[i].offset, c->sacks[i].len);
977 static void handle_out_of_order(struct utcp_connection *c, uint32_t offset, const void *data, size_t len) {
978 debug(c, "out of order packet, offset %u\n", offset);
979 // Packet loss or reordering occured. Store the data in the buffer.
980 ssize_t rxd = buffer_put_at(&c->rcvbuf, offset, data, len);
982 if(rxd < 0 || (size_t)rxd < len) {
986 // Make note of where we put it.
987 for(int i = 0; i < NSACKS; i++) {
988 if(!c->sacks[i].len) { // nothing to merge, add new entry
989 debug(c, "new SACK entry %d\n", i);
990 c->sacks[i].offset = offset;
991 c->sacks[i].len = rxd;
993 } else if(offset < c->sacks[i].offset) {
994 if(offset + rxd < c->sacks[i].offset) { // insert before
995 if(!c->sacks[NSACKS - 1].len) { // only if room left
996 debug(c, "insert SACK entry at %d\n", i);
997 memmove(&c->sacks[i + 1], &c->sacks[i], (NSACKS - i - 1) * sizeof(c->sacks)[i]);
998 c->sacks[i].offset = offset;
999 c->sacks[i].len = rxd;
1001 debug(c, "SACK entries full, dropping packet\n");
1006 debug(c, "merge with start of SACK entry at %d\n", i);
1007 c->sacks[i].offset = offset;
1010 } else if(offset <= c->sacks[i].offset + c->sacks[i].len) {
1011 if(offset + rxd > c->sacks[i].offset + c->sacks[i].len) { // merge
1012 debug(c, "merge with end of SACK entry at %d\n", i);
1013 c->sacks[i].len = offset + rxd - c->sacks[i].offset;
1014 // TODO: handle potential merge with next entry
1021 for(int i = 0; i < NSACKS && c->sacks[i].len; i++) {
1022 debug(c, "SACK[%d] offset %u len %u\n", i, c->sacks[i].offset, c->sacks[i].len);
1026 static void handle_in_order(struct utcp_connection *c, const void *data, size_t len) {
1027 // Check if we can process out-of-order data now.
1028 if(c->sacks[0].len && len >= c->sacks[0].offset) { // TODO: handle overlap with second SACK
1029 debug(c, "incoming packet len %lu connected with SACK at %u\n", (unsigned long)len, c->sacks[0].offset);
1030 buffer_put_at(&c->rcvbuf, 0, data, len); // TODO: handle return value
1031 len = max(len, c->sacks[0].offset + c->sacks[0].len);
1032 data = c->rcvbuf.data;
1036 ssize_t rxd = c->recv(c, data, len);
1038 if(rxd < 0 || (size_t)rxd != len) {
1039 // TODO: handle the application not accepting all data.
1044 if(c->rcvbuf.used) {
1045 sack_consume(c, len);
1052 static void handle_incoming_data(struct utcp_connection *c, uint32_t seq, const void *data, size_t len) {
1053 if(!is_reliable(c)) {
1054 c->recv(c, data, len);
1055 c->rcv.nxt = seq + len;
1059 uint32_t offset = seqdiff(seq, c->rcv.nxt);
1061 if(offset + len > c->rcvbuf.maxsize) {
1066 handle_out_of_order(c, offset, data, len);
1068 handle_in_order(c, data, len);
1073 ssize_t utcp_recv(struct utcp *utcp, const void *data, size_t len) {
1074 const uint8_t *ptr = data;
1090 // Drop packets smaller than the header
1094 if(len < sizeof(hdr)) {
1095 print_packet(NULL, "recv", data, len);
1100 // Make a copy from the potentially unaligned data to a struct hdr
1102 memcpy(&hdr, ptr, sizeof(hdr));
1104 // Try to match the packet to an existing connection
1106 struct utcp_connection *c = find_connection(utcp, hdr.dst, hdr.src);
1107 print_packet(c, "recv", data, len);
1109 // Process the header
1114 // Drop packets with an unknown CTL flag
1116 if(hdr.ctl & ~(SYN | ACK | RST | FIN)) {
1117 print_packet(NULL, "recv", data, len);
1122 // Check for auxiliary headers
1124 const uint8_t *init = NULL;
1126 uint16_t aux = hdr.aux;
1129 size_t auxlen = 4 * (aux >> 8) & 0xf;
1130 uint8_t auxtype = aux & 0xff;
1139 if(!(hdr.ctl & SYN) || auxlen != 4) {
1155 if(!(aux & 0x800)) {
1164 memcpy(&aux, ptr, 2);
1169 bool has_data = len || (hdr.ctl & (SYN | FIN));
1171 // Is it for a new connection?
1174 // Ignore RST packets
1180 // Is it a SYN packet and are we LISTENing?
1182 if(hdr.ctl & SYN && !(hdr.ctl & ACK) && utcp->accept) {
1183 // If we don't want to accept it, send a RST back
1184 if((utcp->pre_accept && !utcp->pre_accept(utcp, hdr.dst))) {
1189 // Try to allocate memory, otherwise send a RST back
1190 c = allocate_connection(utcp, hdr.dst, hdr.src);
1197 // Parse auxilliary information
1204 c->flags = init[3] & 0x7;
1206 c->flags = UTCP_TCP;
1210 // Return SYN+ACK, go to SYN_RECEIVED state
1211 c->snd.wnd = hdr.wnd;
1212 c->rcv.irs = hdr.seq;
1213 c->rcv.nxt = c->rcv.irs + 1;
1214 set_state(c, SYN_RECEIVED);
1221 pkt.hdr.src = c->src;
1222 pkt.hdr.dst = c->dst;
1223 pkt.hdr.ack = c->rcv.irs + 1;
1224 pkt.hdr.seq = c->snd.iss;
1225 pkt.hdr.wnd = c->rcvbuf.maxsize;
1226 pkt.hdr.ctl = SYN | ACK;
1229 pkt.hdr.aux = 0x0101;
1233 pkt.data[3] = c->flags & 0x7;
1234 print_packet(c, "send", &pkt, sizeof(hdr) + 4);
1235 utcp->send(utcp, &pkt, sizeof(hdr) + 4);
1238 print_packet(c, "send", &pkt, sizeof(hdr));
1239 utcp->send(utcp, &pkt, sizeof(hdr));
1242 // No, we don't want your packets, send a RST back
1250 debug(c, "state %s\n", strstate[c->state]);
1252 // In case this is for a CLOSED connection, ignore the packet.
1253 // TODO: make it so incoming packets can never match a CLOSED connection.
1255 if(c->state == CLOSED) {
1256 debug(c, "got packet for closed connection\n");
1260 // It is for an existing connection.
1262 // 1. Drop invalid packets.
1264 // 1a. Drop packets that should not happen in our current state.
1285 // 1b. Discard data that is not in our receive window.
1287 if(is_reliable(c)) {
1290 if(c->state == SYN_SENT) {
1292 } else if(len == 0) {
1293 acceptable = seqdiff(hdr.seq, c->rcv.nxt) >= 0;
1295 int32_t rcv_offset = seqdiff(hdr.seq, c->rcv.nxt);
1297 // cut already accepted front overlapping
1298 if(rcv_offset < 0) {
1299 acceptable = len > (size_t) - rcv_offset;
1304 hdr.seq -= rcv_offset;
1307 acceptable = seqdiff(hdr.seq, c->rcv.nxt) >= 0 && seqdiff(hdr.seq, c->rcv.nxt) + len <= c->rcvbuf.maxsize;
1312 debug(c, "packet not acceptable, %u <= %u + %lu < %u\n", c->rcv.nxt, hdr.seq, (unsigned long)len, c->rcv.nxt + c->rcvbuf.maxsize);
1314 // Ignore unacceptable RST packets.
1319 // Otherwise, continue processing.
1324 int32_t rcv_offset = seqdiff(hdr.seq, c->rcv.nxt);
1327 debug(c, "packet out of order, offset %u bytes", rcv_offset);
1330 if(rcv_offset >= 0) {
1331 c->rcv.nxt = hdr.seq + len;
1337 c->snd.wnd = hdr.wnd; // TODO: move below
1339 // 1c. Drop packets with an invalid ACK.
1340 // ackno should not roll back, and it should also not be bigger than what we ever could have sent
1341 // (= snd.una + c->sndbuf.used).
1343 if(!is_reliable(c)) {
1344 if(hdr.ack != c->snd.last && c->state >= ESTABLISHED) {
1345 hdr.ack = c->snd.una;
1349 if(hdr.ctl & ACK && (seqdiff(hdr.ack, c->snd.last) > 0 || seqdiff(hdr.ack, c->snd.una) < 0)) {
1350 debug(c, "packet ack seqno out of range, %u <= %u < %u\n", c->snd.una, hdr.ack, c->snd.una + c->sndbuf.used);
1352 // Ignore unacceptable RST packets.
1360 // 2. Handle RST packets
1365 if(!(hdr.ctl & ACK)) {
1369 // The peer has refused our connection.
1370 set_state(c, CLOSED);
1371 errno = ECONNREFUSED;
1374 c->recv(c, NULL, 0);
1377 if(c->poll && !c->reapable) {
1388 // We haven't told the application about this connection yet. Silently delete.
1400 // The peer has aborted our connection.
1401 set_state(c, CLOSED);
1405 c->recv(c, NULL, 0);
1408 if(c->poll && !c->reapable) {
1421 // As far as the application is concerned, the connection has already been closed.
1422 // If it has called utcp_close() already, we can immediately free this connection.
1428 // Otherwise, immediately move to the CLOSED state.
1429 set_state(c, CLOSED);
1442 if(!(hdr.ctl & ACK)) {
1447 // 3. Advance snd.una
1449 advanced = seqdiff(hdr.ack, c->snd.una);
1453 if(c->rtt_start.tv_sec) {
1454 if(c->rtt_seq == hdr.ack) {
1455 struct timespec now;
1456 clock_gettime(UTCP_CLOCK, &now);
1457 int32_t diff = timespec_diff_usec(&now, &c->rtt_start);
1458 update_rtt(c, diff);
1459 c->rtt_start.tv_sec = 0;
1460 } else if(c->rtt_seq < hdr.ack) {
1461 debug(c, "cancelling RTT measurement: %u < %u\n", c->rtt_seq, hdr.ack);
1462 c->rtt_start.tv_sec = 0;
1466 int32_t data_acked = advanced;
1474 // TODO: handle FIN as well.
1479 assert(data_acked >= 0);
1482 int32_t bufused = seqdiff(c->snd.last, c->snd.una);
1483 assert(data_acked <= bufused);
1487 buffer_discard(&c->sndbuf, data_acked);
1491 // Also advance snd.nxt if possible
1492 if(seqdiff(c->snd.nxt, hdr.ack) < 0) {
1493 c->snd.nxt = hdr.ack;
1496 c->snd.una = hdr.ack;
1499 if(c->dupack >= 3) {
1500 debug(c, "fast recovery ended\n");
1501 c->snd.cwnd = c->snd.ssthresh;
1507 // Increase the congestion window according to RFC 5681
1508 if(c->snd.cwnd < c->snd.ssthresh) {
1509 c->snd.cwnd += min(advanced, utcp->mss); // eq. 2
1511 c->snd.cwnd += max(1, (utcp->mss * utcp->mss) / c->snd.cwnd); // eq. 3
1514 if(c->snd.cwnd > c->sndbuf.maxsize) {
1515 c->snd.cwnd = c->sndbuf.maxsize;
1520 // Check if we have sent a FIN that is now ACKed.
1523 if(c->snd.una == c->snd.last) {
1524 set_state(c, FIN_WAIT_2);
1530 if(c->snd.una == c->snd.last) {
1531 clock_gettime(UTCP_CLOCK, &c->conn_timeout);
1532 c->conn_timeout.tv_sec += utcp->timeout;
1533 set_state(c, TIME_WAIT);
1542 if(!len && is_reliable(c) && c->snd.una != c->snd.last) {
1544 debug(c, "duplicate ACK %d\n", c->dupack);
1546 if(c->dupack == 3) {
1547 // RFC 5681 fast recovery
1548 debug(c, "fast recovery started\n", c->dupack);
1549 uint32_t flightsize = seqdiff(c->snd.nxt, c->snd.una);
1550 c->snd.ssthresh = max(flightsize / 2, utcp->mss * 2); // eq. 4
1551 c->snd.cwnd = min(c->snd.ssthresh + 3 * utcp->mss, c->sndbuf.maxsize);
1553 if(c->snd.cwnd > c->sndbuf.maxsize) {
1554 c->snd.cwnd = c->sndbuf.maxsize;
1560 } else if(c->dupack > 3) {
1561 c->snd.cwnd += utcp->mss;
1563 if(c->snd.cwnd > c->sndbuf.maxsize) {
1564 c->snd.cwnd = c->sndbuf.maxsize;
1570 // We got an ACK which indicates the other side did get one of our packets.
1571 // Reset the retransmission timer to avoid going to slow start,
1572 // but don't touch the connection timeout.
1573 start_retransmit_timer(c);
1580 if(c->snd.una == c->snd.last) {
1581 stop_retransmit_timer(c);
1582 timespec_clear(&c->conn_timeout);
1583 } else if(is_reliable(c)) {
1584 start_retransmit_timer(c);
1585 clock_gettime(UTCP_CLOCK, &c->conn_timeout);
1586 c->conn_timeout.tv_sec += utcp->timeout;
1591 // 5. Process SYN stuff
1597 // This is a SYNACK. It should always have ACKed the SYN.
1602 c->rcv.irs = hdr.seq;
1603 c->rcv.nxt = hdr.seq;
1607 set_state(c, FIN_WAIT_1);
1609 set_state(c, ESTABLISHED);
1612 // TODO: notify application of this somehow.
1616 // This is a retransmit of a SYN, send back the SYNACK.
1626 // Ehm, no. We should never receive a second SYN.
1636 // SYN counts as one sequence number
1640 // 6. Process new data
1642 if(c->state == SYN_RECEIVED) {
1643 // This is the ACK after the SYNACK. It should always have ACKed the SYNACK.
1648 // Are we still LISTENing?
1650 utcp->accept(c, c->src);
1653 if(c->state != ESTABLISHED) {
1654 set_state(c, CLOSED);
1664 // This should never happen.
1679 // Ehm no, We should never receive more data after a FIN.
1689 handle_incoming_data(c, hdr.seq, ptr, len);
1692 // 7. Process FIN stuff
1694 if((hdr.ctl & FIN) && (!is_reliable(c) || hdr.seq + len == c->rcv.nxt)) {
1698 // This should never happen.
1705 set_state(c, CLOSE_WAIT);
1709 set_state(c, CLOSING);
1713 clock_gettime(UTCP_CLOCK, &c->conn_timeout);
1714 c->conn_timeout.tv_sec += utcp->timeout;
1715 set_state(c, TIME_WAIT);
1722 // Ehm, no. We should never receive a second FIN.
1732 // FIN counts as one sequence number
1736 // Inform the application that the peer closed its end of the connection.
1739 c->recv(c, NULL, 0);
1743 // Now we send something back if:
1744 // - we received data, so we have to send back an ACK
1745 // -> sendatleastone = true
1746 // - or we got an ack, so we should maybe send a bit more data
1747 // -> sendatleastone = false
1749 if(is_reliable(c) || hdr.ctl & SYN || hdr.ctl & FIN) {
1764 hdr.ack = hdr.seq + len;
1766 hdr.ctl = RST | ACK;
1769 print_packet(c, "send", &hdr, sizeof(hdr));
1770 utcp->send(utcp, &hdr, sizeof(hdr));
1775 int utcp_shutdown(struct utcp_connection *c, int dir) {
1776 debug(c, "shutdown %d at %u\n", dir, c ? c->snd.last : 0);
1784 debug(c, "shutdown() called on closed connection\n");
1789 if(!(dir == UTCP_SHUT_RD || dir == UTCP_SHUT_WR || dir == UTCP_SHUT_RDWR)) {
1794 // TCP does not have a provision for stopping incoming packets.
1795 // The best we can do is to just ignore them.
1796 if(dir == UTCP_SHUT_RD || dir == UTCP_SHUT_RDWR) {
1800 // The rest of the code deals with shutting down writes.
1801 if(dir == UTCP_SHUT_RD) {
1805 // Only process shutting down writes once.
1823 set_state(c, FIN_WAIT_1);
1831 set_state(c, CLOSING);
1844 if(!timespec_isset(&c->rtrx_timeout)) {
1845 start_retransmit_timer(c);
1851 static bool reset_connection(struct utcp_connection *c) {
1858 debug(c, "abort() called on closed connection\n");
1875 set_state(c, CLOSED);
1883 set_state(c, CLOSED);
1893 hdr.seq = c->snd.nxt;
1898 print_packet(c, "send", &hdr, sizeof(hdr));
1899 c->utcp->send(c->utcp, &hdr, sizeof(hdr));
1903 // Closes all the opened connections
1904 void utcp_abort_all_connections(struct utcp *utcp) {
1910 for(int i = 0; i < utcp->nconnections; i++) {
1911 struct utcp_connection *c = utcp->connections[i];
1913 if(c->reapable || c->state == CLOSED) {
1917 utcp_recv_t old_recv = c->recv;
1918 utcp_poll_t old_poll = c->poll;
1920 reset_connection(c);
1924 old_recv(c, NULL, 0);
1927 if(old_poll && !c->reapable) {
1936 int utcp_close(struct utcp_connection *c) {
1937 if(utcp_shutdown(c, SHUT_RDWR) && errno != ENOTCONN) {
1947 int utcp_abort(struct utcp_connection *c) {
1948 if(!reset_connection(c)) {
1957 * One call to this function will loop through all connections,
1958 * checking if something needs to be resent or not.
1959 * The return value is the time to the next timeout in milliseconds,
1960 * or maybe a negative value if the timeout is infinite.
1962 struct timespec utcp_timeout(struct utcp *utcp) {
1963 struct timespec now;
1964 clock_gettime(UTCP_CLOCK, &now);
1965 struct timespec next = {now.tv_sec + 3600, now.tv_nsec};
1967 for(int i = 0; i < utcp->nconnections; i++) {
1968 struct utcp_connection *c = utcp->connections[i];
1974 // delete connections that have been utcp_close()d.
1975 if(c->state == CLOSED) {
1977 debug(c, "reaping\n");
1985 if(timespec_isset(&c->conn_timeout) && timespec_lt(&c->conn_timeout, &now)) {
1990 c->recv(c, NULL, 0);
1993 if(c->poll && !c->reapable) {
2000 if(timespec_isset(&c->rtrx_timeout) && timespec_lt(&c->rtrx_timeout, &now)) {
2001 debug(c, "retransmitting after timeout\n");
2006 if((c->state == ESTABLISHED || c->state == CLOSE_WAIT) && c->do_poll) {
2008 uint32_t len = buffer_free(&c->sndbuf);
2013 } else if(c->state == CLOSED) {
2018 if(timespec_isset(&c->conn_timeout) && timespec_lt(&c->conn_timeout, &next)) {
2019 next = c->conn_timeout;
2022 if(timespec_isset(&c->rtrx_timeout) && timespec_lt(&c->rtrx_timeout, &next)) {
2023 next = c->rtrx_timeout;
2027 struct timespec diff;
2029 timespec_sub(&next, &now, &diff);
2034 bool utcp_is_active(struct utcp *utcp) {
2039 for(int i = 0; i < utcp->nconnections; i++)
2040 if(utcp->connections[i]->state != CLOSED && utcp->connections[i]->state != TIME_WAIT) {
2047 struct utcp *utcp_init(utcp_accept_t accept, utcp_pre_accept_t pre_accept, utcp_send_t send, void *priv) {
2053 struct utcp *utcp = calloc(1, sizeof(*utcp));
2059 if(!CLOCK_GRANULARITY) {
2060 struct timespec res;
2061 clock_getres(UTCP_CLOCK, &res);
2062 CLOCK_GRANULARITY = res.tv_sec * USEC_PER_SEC + res.tv_nsec / 1000;
2065 utcp->accept = accept;
2066 utcp->pre_accept = pre_accept;
2069 utcp_set_mtu(utcp, DEFAULT_MTU);
2070 utcp->timeout = DEFAULT_USER_TIMEOUT; // sec
2071 utcp->rto = START_RTO; // usec
2076 void utcp_exit(struct utcp *utcp) {
2081 for(int i = 0; i < utcp->nconnections; i++) {
2082 struct utcp_connection *c = utcp->connections[i];
2086 c->recv(c, NULL, 0);
2089 if(c->poll && !c->reapable) {
2094 buffer_exit(&c->rcvbuf);
2095 buffer_exit(&c->sndbuf);
2099 free(utcp->connections);
2103 uint16_t utcp_get_mtu(struct utcp *utcp) {
2104 return utcp ? utcp->mtu : 0;
2107 uint16_t utcp_get_mss(struct utcp *utcp) {
2108 return utcp ? utcp->mss : 0;
2111 void utcp_set_mtu(struct utcp *utcp, uint16_t mtu) {
2116 if(mtu <= sizeof(struct hdr)) {
2120 if(mtu > utcp->mtu) {
2121 char *new = realloc(utcp->pkt, mtu + sizeof(struct hdr));
2131 utcp->mss = mtu - sizeof(struct hdr);
2134 void utcp_reset_timers(struct utcp *utcp) {
2139 struct timespec now, then;
2141 clock_gettime(UTCP_CLOCK, &now);
2145 then.tv_sec += utcp->timeout;
2147 for(int i = 0; i < utcp->nconnections; i++) {
2148 struct utcp_connection *c = utcp->connections[i];
2154 if(timespec_isset(&c->rtrx_timeout)) {
2155 c->rtrx_timeout = now;
2158 if(timespec_isset(&c->conn_timeout)) {
2159 c->conn_timeout = then;
2162 c->rtt_start.tv_sec = 0;
2165 if(utcp->rto > START_RTO) {
2166 utcp->rto = START_RTO;
2170 int utcp_get_user_timeout(struct utcp *u) {
2171 return u ? u->timeout : 0;
2174 void utcp_set_user_timeout(struct utcp *u, int timeout) {
2176 u->timeout = timeout;
2180 size_t utcp_get_sndbuf(struct utcp_connection *c) {
2181 return c ? c->sndbuf.maxsize : 0;
2184 size_t utcp_get_sndbuf_free(struct utcp_connection *c) {
2194 return buffer_free(&c->sndbuf);
2201 void utcp_set_sndbuf(struct utcp_connection *c, size_t size) {
2206 c->sndbuf.maxsize = size;
2208 if(c->sndbuf.maxsize != size) {
2209 c->sndbuf.maxsize = -1;
2212 c->do_poll = buffer_free(&c->sndbuf);
2215 size_t utcp_get_rcvbuf(struct utcp_connection *c) {
2216 return c ? c->rcvbuf.maxsize : 0;
2219 size_t utcp_get_rcvbuf_free(struct utcp_connection *c) {
2220 if(c && (c->state == ESTABLISHED || c->state == CLOSE_WAIT)) {
2221 return buffer_free(&c->rcvbuf);
2227 void utcp_set_rcvbuf(struct utcp_connection *c, size_t size) {
2232 c->rcvbuf.maxsize = size;
2234 if(c->rcvbuf.maxsize != size) {
2235 c->rcvbuf.maxsize = -1;
2239 size_t utcp_get_sendq(struct utcp_connection *c) {
2240 return c->sndbuf.used;
2243 size_t utcp_get_recvq(struct utcp_connection *c) {
2244 return c->rcvbuf.used;
2247 bool utcp_get_nodelay(struct utcp_connection *c) {
2248 return c ? c->nodelay : false;
2251 void utcp_set_nodelay(struct utcp_connection *c, bool nodelay) {
2253 c->nodelay = nodelay;
2257 bool utcp_get_keepalive(struct utcp_connection *c) {
2258 return c ? c->keepalive : false;
2261 void utcp_set_keepalive(struct utcp_connection *c, bool keepalive) {
2263 c->keepalive = keepalive;
2267 size_t utcp_get_outq(struct utcp_connection *c) {
2268 return c ? seqdiff(c->snd.nxt, c->snd.una) : 0;
2271 void utcp_set_recv_cb(struct utcp_connection *c, utcp_recv_t recv) {
2277 void utcp_set_poll_cb(struct utcp_connection *c, utcp_poll_t poll) {
2280 c->do_poll = buffer_free(&c->sndbuf);
2284 void utcp_set_accept_cb(struct utcp *utcp, utcp_accept_t accept, utcp_pre_accept_t pre_accept) {
2286 utcp->accept = accept;
2287 utcp->pre_accept = pre_accept;
2291 void utcp_expect_data(struct utcp_connection *c, bool expect) {
2292 if(!c || c->reapable) {
2296 if(!(c->state == ESTABLISHED || c->state == FIN_WAIT_1 || c->state == FIN_WAIT_2)) {
2301 // If we expect data, start the connection timer.
2302 if(!timespec_isset(&c->conn_timeout)) {
2303 clock_gettime(UTCP_CLOCK, &c->conn_timeout);
2304 c->conn_timeout.tv_sec += c->utcp->timeout;
2307 // If we want to cancel expecting data, only clear the timer when there is no unACKed data.
2308 if(c->snd.una == c->snd.last) {
2309 timespec_clear(&c->conn_timeout);
2314 void utcp_offline(struct utcp *utcp, bool offline) {
2315 struct timespec now;
2316 clock_gettime(UTCP_CLOCK, &now);
2318 for(int i = 0; i < utcp->nconnections; i++) {
2319 struct utcp_connection *c = utcp->connections[i];
2325 utcp_expect_data(c, offline);
2328 if(timespec_isset(&c->rtrx_timeout)) {
2329 c->rtrx_timeout = now;
2332 utcp->connections[i]->rtt_start.tv_sec = 0;
2336 if(!offline && utcp->rto > START_RTO) {
2337 utcp->rto = START_RTO;
2341 void utcp_set_clock_granularity(long granularity) {
2342 CLOCK_GRANULARITY = granularity;