2 utcp.c -- Userspace TCP
3 Copyright (C) 2014-2017 Guus Sliepen <guus@tinc-vpn.org>
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License along
16 with this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
23 #include "utcp_priv.h"
38 #if defined(CLOCK_MONOTONIC_RAW) && defined(__x86_64__)
39 #define UTCP_CLOCK CLOCK_MONOTONIC_RAW
41 #define UTCP_CLOCK CLOCK_MONOTONIC
45 static void timespec_sub(const struct timespec *a, const struct timespec *b, struct timespec *r) {
46 r->tv_sec = a->tv_sec - b->tv_sec;
47 r->tv_nsec = a->tv_nsec - b->tv_nsec;
50 r->tv_sec--, r->tv_nsec += NSEC_PER_SEC;
54 static int32_t timespec_diff_usec(const struct timespec *a, const struct timespec *b) {
55 return (a->tv_sec - b->tv_sec) * 1000000 + (a->tv_nsec - b->tv_nsec) / 1000;
58 static bool timespec_lt(const struct timespec *a, const struct timespec *b) {
59 if(a->tv_sec == b->tv_sec) {
60 return a->tv_nsec < b->tv_nsec;
62 return a->tv_sec < b->tv_sec;
66 static void timespec_clear(struct timespec *a) {
71 static bool timespec_isset(const struct timespec *a) {
75 static long CLOCK_GRANULARITY; // usec
77 static inline size_t min(size_t a, size_t b) {
81 static inline size_t max(size_t a, size_t b) {
88 #ifndef UTCP_DEBUG_DATALEN
89 #define UTCP_DEBUG_DATALEN 20
92 static void debug(struct utcp_connection *c, const char *format, ...) {
97 clock_gettime(CLOCK_REALTIME, &tv);
98 len = snprintf(buf, sizeof(buf), "%ld.%06lu %u:%u ", (long)tv.tv_sec, tv.tv_nsec / 1000, c ? c->src : 0, c ? c->dst : 0);
100 va_start(ap, format);
101 len += vsnprintf(buf + len, sizeof(buf) - len, format, ap);
104 if(len > 0 && (size_t)len < sizeof(buf)) {
105 fwrite(buf, len, 1, stderr);
109 static void print_packet(struct utcp_connection *c, const char *dir, const void *pkt, size_t len) {
112 if(len < sizeof(hdr)) {
113 debug(c, "%s: short packet (%lu bytes)\n", dir, (unsigned long)len);
117 memcpy(&hdr, pkt, sizeof(hdr));
121 if(len > sizeof(hdr)) {
122 datalen = min(len - sizeof(hdr), UTCP_DEBUG_DATALEN);
128 const uint8_t *data = (uint8_t *)pkt + sizeof(hdr);
129 char str[datalen * 2 + 1];
132 for(uint32_t i = 0; i < datalen; i++) {
133 *p++ = "0123456789ABCDEF"[data[i] >> 4];
134 *p++ = "0123456789ABCDEF"[data[i] & 15];
139 debug(c, "%s: len %lu src %u dst %u seq %u ack %u wnd %u aux %x ctl %s%s%s%s%s data %s\n",
140 dir, (unsigned long)len, hdr.src, hdr.dst, hdr.seq, hdr.ack, hdr.wnd, hdr.aux,
141 hdr.ctl & SYN ? "SYN" : "",
142 hdr.ctl & RST ? "RST" : "",
143 hdr.ctl & FIN ? "FIN" : "",
144 hdr.ctl & ACK ? "ACK" : "",
145 hdr.ctl & MF ? "MF" : "",
150 static void debug_cwnd(struct utcp_connection *c) {
151 debug(c, "snd.cwnd %u snd.ssthresh %u\n", c->snd.cwnd, ~c->snd.ssthresh ? c->snd.ssthresh : 0);
154 #define debug(...) do {} while(0)
155 #define print_packet(...) do {} while(0)
156 #define debug_cwnd(...) do {} while(0)
159 static void set_state(struct utcp_connection *c, enum state state) {
162 if(state == ESTABLISHED) {
163 timespec_clear(&c->conn_timeout);
166 debug(c, "state %s\n", strstate[state]);
169 static bool fin_wanted(struct utcp_connection *c, uint32_t seq) {
170 if(seq != c->snd.last) {
185 static int32_t seqdiff(uint32_t a, uint32_t b) {
190 static bool buffer_wraps(struct buffer *buf) {
191 return buf->size - buf->offset < buf->used;
194 static bool buffer_resize(struct buffer *buf, uint32_t newsize) {
195 assert(!buf->external);
205 char *newdata = realloc(buf->data, newsize);
213 if(buffer_wraps(buf)) {
214 // Shift the right part of the buffer until it hits the end of the new buffer.
218 // [345.........|........012]
219 uint32_t tailsize = buf->size - buf->offset;
220 uint32_t newoffset = newsize - tailsize;
221 memmove(buf->data + newoffset, buf->data + buf->offset, tailsize);
222 buf->offset = newoffset;
229 // Store data into the buffer
230 static ssize_t buffer_put_at(struct buffer *buf, size_t offset, const void *data, size_t len) {
231 debug(NULL, "buffer_put_at %lu %lu %lu\n", (unsigned long)buf->used, (unsigned long)offset, (unsigned long)len);
233 // Ensure we don't store more than maxsize bytes in total
234 size_t required = offset + len;
236 if(required > buf->maxsize) {
237 if(offset >= buf->maxsize) {
241 len = buf->maxsize - offset;
242 required = buf->maxsize;
245 // Check if we need to resize the buffer
246 if(required > buf->size) {
247 size_t newsize = buf->size;
255 } while(newsize < required);
257 if(newsize > buf->maxsize) {
258 newsize = buf->maxsize;
261 if(!buffer_resize(buf, newsize)) {
266 uint32_t realoffset = buf->offset + offset;
268 if(buf->size - buf->offset <= offset) {
269 // The offset wrapped
270 realoffset -= buf->size;
273 if(buf->size - realoffset < len) {
274 // The new chunk of data must be wrapped
275 memcpy(buf->data + realoffset, data, buf->size - realoffset);
276 memcpy(buf->data, (char *)data + buf->size - realoffset, len - (buf->size - realoffset));
278 memcpy(buf->data + realoffset, data, len);
281 if(required > buf->used) {
282 buf->used = required;
288 static ssize_t buffer_put(struct buffer *buf, const void *data, size_t len) {
289 return buffer_put_at(buf, buf->used, data, len);
292 // Copy data from the buffer without removing it.
293 static ssize_t buffer_copy(struct buffer *buf, void *data, size_t offset, size_t len) {
294 // Ensure we don't copy more than is actually stored in the buffer
295 if(offset >= buf->used) {
299 if(buf->used - offset < len) {
300 len = buf->used - offset;
303 uint32_t realoffset = buf->offset + offset;
305 if(buf->size - buf->offset <= offset) {
306 // The offset wrapped
307 realoffset -= buf->size;
310 if(buf->size - realoffset < len) {
311 // The data is wrapped
312 memcpy(data, buf->data + realoffset, buf->size - realoffset);
313 memcpy((char *)data + buf->size - realoffset, buf->data, len - (buf->size - realoffset));
315 memcpy(data, buf->data + realoffset, len);
321 // Discard data from the buffer.
322 static ssize_t buffer_discard(struct buffer *buf, size_t len) {
323 if(buf->used < len) {
327 if(buf->size - buf->offset <= len) {
328 buf->offset -= buf->size;
331 if(buf->used == len) {
342 static void buffer_clear(struct buffer *buf) {
347 static bool buffer_set_size(struct buffer *buf, uint32_t minsize, uint32_t maxsize) {
348 if(maxsize < minsize) {
352 buf->maxsize = maxsize;
354 return buf->size >= minsize || buffer_resize(buf, minsize);
357 static void buffer_transfer(struct buffer *buf, char *newdata, size_t newsize) {
358 if(buffer_wraps(buf)) {
363 uint32_t tailsize = buf->size - buf->offset;
364 memcpy(newdata, buf->data + buf->offset, tailsize);
365 memcpy(newdata + tailsize, buf->data, buf->used - tailsize);
371 memcpy(newdata, buf->data + buf->offset, buf->used);
378 static void set_buffer_storage(struct buffer *buf, char *data, size_t size) {
379 if(size > UINT32_MAX) {
387 // Don't allow resizing an external buffer
391 if(size < buf->used) {
392 // Ignore requests for an external buffer if we are already using more than it can store
396 // Transition from internal to external buffer
397 buffer_transfer(buf, data, size);
400 buf->external = true;
401 } else if(buf->external) {
402 // Transition from external to internal buf
403 size_t minsize = buf->used <= DEFAULT_SNDBUFSIZE ? DEFAULT_SNDBUFSIZE : buf->used;
406 data = malloc(minsize);
409 // Cannot handle this
413 buffer_transfer(buf, data, minsize);
420 buf->external = false;
422 // Don't do anything if the buffer wraps
423 if(buffer_wraps(buf)) {
427 // Realloc internal storage
428 size_t minsize = max(DEFAULT_SNDBUFSIZE, buf->offset + buf->used);
431 data = realloc(buf->data, minsize);
445 static void buffer_exit(struct buffer *buf) {
450 memset(buf, 0, sizeof(*buf));
453 static uint32_t buffer_free(const struct buffer *buf) {
454 return buf->maxsize > buf->used ? buf->maxsize - buf->used : 0;
457 // Connections are stored in a sorted list.
458 // This gives O(log(N)) lookup time, O(N log(N)) insertion time and O(N) deletion time.
460 static int compare(const void *va, const void *vb) {
463 const struct utcp_connection *a = *(struct utcp_connection **)va;
464 const struct utcp_connection *b = *(struct utcp_connection **)vb;
468 int c = (int)a->src - (int)b->src;
474 c = (int)a->dst - (int)b->dst;
478 static struct utcp_connection *find_connection(const struct utcp *utcp, uint16_t src, uint16_t dst) {
479 if(!utcp->nconnections) {
483 struct utcp_connection key = {
487 struct utcp_connection **match = bsearch(&keyp, utcp->connections, utcp->nconnections, sizeof(*utcp->connections), compare);
488 return match ? *match : NULL;
491 static void free_connection(struct utcp_connection *c) {
492 struct utcp *utcp = c->utcp;
493 struct utcp_connection **cp = bsearch(&c, utcp->connections, utcp->nconnections, sizeof(*utcp->connections), compare);
497 int i = cp - utcp->connections;
498 memmove(cp, cp + 1, (utcp->nconnections - i - 1) * sizeof(*cp));
499 utcp->nconnections--;
501 buffer_exit(&c->rcvbuf);
502 buffer_exit(&c->sndbuf);
506 static struct utcp_connection *allocate_connection(struct utcp *utcp, uint16_t src, uint16_t dst) {
507 // Check whether this combination of src and dst is free
510 if(find_connection(utcp, src, dst)) {
514 } else { // If src == 0, generate a random port number with the high bit set
515 if(utcp->nconnections >= 32767) {
520 src = rand() | 0x8000;
522 while(find_connection(utcp, src, dst)) {
527 // Allocate memory for the new connection
529 if(utcp->nconnections >= utcp->nallocated) {
530 if(!utcp->nallocated) {
531 utcp->nallocated = 4;
533 utcp->nallocated *= 2;
536 struct utcp_connection **new_array = realloc(utcp->connections, utcp->nallocated * sizeof(*utcp->connections));
542 utcp->connections = new_array;
545 struct utcp_connection *c = calloc(1, sizeof(*c));
551 if(!buffer_set_size(&c->sndbuf, DEFAULT_SNDBUFSIZE, DEFAULT_MAXSNDBUFSIZE)) {
556 if(!buffer_set_size(&c->rcvbuf, DEFAULT_RCVBUFSIZE, DEFAULT_MAXRCVBUFSIZE)) {
557 buffer_exit(&c->sndbuf);
562 // Fill in the details
571 c->snd.una = c->snd.iss;
572 c->snd.nxt = c->snd.iss + 1;
573 c->snd.last = c->snd.nxt;
574 c->snd.cwnd = (utcp->mss > 2190 ? 2 : utcp->mss > 1095 ? 3 : 4) * utcp->mss;
575 c->snd.ssthresh = ~0;
582 // Add it to the sorted list of connections
584 utcp->connections[utcp->nconnections++] = c;
585 qsort(utcp->connections, utcp->nconnections, sizeof(*utcp->connections), compare);
590 static inline uint32_t absdiff(uint32_t a, uint32_t b) {
598 // Update RTT variables. See RFC 6298.
599 static void update_rtt(struct utcp_connection *c, uint32_t rtt) {
601 debug(c, "invalid rtt\n");
609 c->rttvar = (c->rttvar * 3 + absdiff(c->srtt, rtt)) / 4;
610 c->srtt = (c->srtt * 7 + rtt) / 8;
613 c->rto = c->srtt + max(4 * c->rttvar, CLOCK_GRANULARITY);
615 if(c->rto > MAX_RTO) {
619 debug(c, "rtt %u srtt %u rttvar %u rto %u\n", rtt, c->srtt, c->rttvar, c->rto);
622 static void start_retransmit_timer(struct utcp_connection *c) {
623 clock_gettime(UTCP_CLOCK, &c->rtrx_timeout);
625 uint32_t rto = c->rto;
627 while(rto > USEC_PER_SEC) {
628 c->rtrx_timeout.tv_sec++;
632 c->rtrx_timeout.tv_nsec += rto * 1000;
634 if(c->rtrx_timeout.tv_nsec >= NSEC_PER_SEC) {
635 c->rtrx_timeout.tv_nsec -= NSEC_PER_SEC;
636 c->rtrx_timeout.tv_sec++;
639 debug(c, "rtrx_timeout %ld.%06lu\n", c->rtrx_timeout.tv_sec, c->rtrx_timeout.tv_nsec);
642 static void stop_retransmit_timer(struct utcp_connection *c) {
643 timespec_clear(&c->rtrx_timeout);
644 debug(c, "rtrx_timeout cleared\n");
647 struct utcp_connection *utcp_connect_ex(struct utcp *utcp, uint16_t dst, utcp_recv_t recv, void *priv, uint32_t flags) {
648 struct utcp_connection *c = allocate_connection(utcp, 0, dst);
654 assert(flags == 0); // UDP only
665 pkt.hdr.src = c->src;
666 pkt.hdr.dst = c->dst;
667 pkt.hdr.seq = c->snd.iss;
669 pkt.hdr.wnd = c->rcvbuf.maxsize;
671 pkt.hdr.aux = 0x0101;
675 pkt.init[3] = flags & 0x7;
677 set_state(c, SYN_SENT);
679 print_packet(c, "send", &pkt, sizeof(pkt));
680 utcp->send(utcp, &pkt, sizeof(pkt));
682 clock_gettime(UTCP_CLOCK, &c->conn_timeout);
683 c->conn_timeout.tv_sec += utcp->timeout;
685 start_retransmit_timer(c);
690 void utcp_accept(struct utcp_connection *c, utcp_recv_t recv, void *priv) {
691 if(c->reapable || c->state != SYN_RECEIVED) {
692 debug(c, "accept() called on invalid connection in state %s\n", c, strstate[c->state]);
696 debug(c, "accepted %p %p\n", c, recv, priv);
699 set_state(c, ESTABLISHED);
702 static void ack(struct utcp_connection *c, bool sendatleastone) {
703 int32_t left = seqdiff(c->snd.last, c->snd.nxt);
704 int32_t cwndleft = MAX_UNRELIABLE_SIZE;
710 } else if(cwndleft < left) {
713 if(!sendatleastone || cwndleft > c->utcp->mss) {
714 left -= left % c->utcp->mss;
718 debug(c, "cwndleft %d left %d\n", cwndleft, left);
720 if(!left && !sendatleastone) {
727 } *pkt = c->utcp->pkt;
729 pkt->hdr.src = c->src;
730 pkt->hdr.dst = c->dst;
731 pkt->hdr.ack = c->rcv.nxt;
737 uint32_t seglen = left > c->utcp->mss ? c->utcp->mss : left;
738 pkt->hdr.seq = c->snd.nxt;
740 buffer_copy(&c->sndbuf, pkt->data, seqdiff(c->snd.nxt, c->snd.una), seglen);
742 c->snd.nxt += seglen;
745 if(seglen && fin_wanted(c, c->snd.nxt)) {
750 if(!c->rtt_start.tv_sec) {
751 // Start RTT measurement
752 clock_gettime(UTCP_CLOCK, &c->rtt_start);
753 c->rtt_seq = pkt->hdr.seq + seglen;
754 debug(c, "starting RTT measurement, expecting ack %u\n", c->rtt_seq);
757 print_packet(c, "send", pkt, sizeof(pkt->hdr) + seglen);
758 c->utcp->send(c->utcp, pkt, sizeof(pkt->hdr) + seglen);
761 pkt->hdr.wnd += seglen;
766 ssize_t utcp_send(struct utcp_connection *c, const void *data, size_t len) {
768 debug(c, "send() called on closed connection\n");
776 debug(c, "send() called on unconnected connection\n");
791 debug(c, "send() called on closed connection\n");
796 // Exit early if we have nothing to send.
807 // Add data to send buffer.
809 if(c->state != SYN_SENT && c->state != SYN_RECEIVED) {
810 if(len > MAX_UNRELIABLE_SIZE || buffer_put(&c->sndbuf, data, len) != (ssize_t)len) {
824 // Don't send anything yet if the connection has not fully established yet
826 if(c->state == SYN_SENT || c->state == SYN_RECEIVED) {
832 c->snd.una = c->snd.nxt = c->snd.last;
833 buffer_discard(&c->sndbuf, c->sndbuf.used);
838 static void swap_ports(struct hdr *hdr) {
839 uint16_t tmp = hdr->src;
844 static void retransmit(struct utcp_connection *c) {
845 if(c->state == CLOSED || c->snd.last == c->snd.una) {
846 debug(c, "retransmit() called but nothing to retransmit!\n");
847 stop_retransmit_timer(c);
851 struct utcp *utcp = c->utcp;
856 } *pkt = c->utcp->pkt;
858 pkt->hdr.src = c->src;
859 pkt->hdr.dst = c->dst;
860 pkt->hdr.wnd = c->rcvbuf.maxsize;
865 // Send our SYN again
866 pkt->hdr.seq = c->snd.iss;
869 pkt->hdr.aux = 0x0101;
873 pkt->data[3] = c->flags & 0x7;
874 print_packet(c, "rtrx", pkt, sizeof(pkt->hdr) + 4);
875 utcp->send(utcp, pkt, sizeof(pkt->hdr) + 4);
880 pkt->hdr.seq = c->snd.nxt;
881 pkt->hdr.ack = c->rcv.nxt;
882 pkt->hdr.ctl = SYN | ACK;
883 print_packet(c, "rtrx", pkt, sizeof(pkt->hdr));
884 utcp->send(utcp, pkt, sizeof(pkt->hdr));
894 // Send unacked data again.
895 pkt->hdr.seq = c->snd.una;
896 pkt->hdr.ack = c->rcv.nxt;
898 uint32_t len = min(seqdiff(c->snd.last, c->snd.una), utcp->mss);
900 if(fin_wanted(c, c->snd.una + len)) {
909 print_packet(c, "rtrx", pkt, sizeof(pkt->hdr) + len);
910 utcp->send(utcp, pkt, sizeof(pkt->hdr) + len);
917 // We shouldn't need to retransmit anything in this state.
921 stop_retransmit_timer(c);
925 start_retransmit_timer(c);
928 if(c->rto > MAX_RTO) {
932 c->rtt_start.tv_sec = 0; // invalidate RTT timer
933 c->dupack = 0; // cancel any ongoing fast recovery
939 static void handle_unreliable(struct utcp_connection *c, const struct hdr *hdr, const void *data, size_t len) {
940 // Fast path for unfragmented packets
941 if(!hdr->wnd && !(hdr->ctl & MF)) {
943 c->recv(c, data, len);
946 c->rcv.nxt = hdr->seq + len;
951 static void handle_incoming_data(struct utcp_connection *c, const struct hdr *hdr, const void *data, size_t len) {
952 handle_unreliable(c, hdr, data, len);
955 ssize_t utcp_recv(struct utcp *utcp, const void *data, size_t len) {
956 const uint8_t *ptr = data;
972 // Drop packets smaller than the header
976 if(len < sizeof(hdr)) {
977 print_packet(NULL, "recv", data, len);
982 // Make a copy from the potentially unaligned data to a struct hdr
984 memcpy(&hdr, ptr, sizeof(hdr));
986 // Try to match the packet to an existing connection
988 struct utcp_connection *c = find_connection(utcp, hdr.dst, hdr.src);
989 print_packet(c, "recv", data, len);
991 // Process the header
996 // Drop packets with an unknown CTL flag
998 if(hdr.ctl & ~(SYN | ACK | RST | FIN | MF)) {
999 print_packet(NULL, "recv", data, len);
1004 // Check for auxiliary headers
1006 const uint8_t *init = NULL;
1008 uint16_t aux = hdr.aux;
1011 size_t auxlen = 4 * (aux >> 8) & 0xf;
1012 uint8_t auxtype = aux & 0xff;
1021 if(!(hdr.ctl & SYN) || auxlen != 4) {
1037 if(!(aux & 0x800)) {
1046 memcpy(&aux, ptr, 2);
1051 bool has_data = len || (hdr.ctl & (SYN | FIN));
1053 // Is it for a new connection?
1056 // Ignore RST packets
1062 // Is it a SYN packet and are we LISTENing?
1064 if(hdr.ctl & SYN && !(hdr.ctl & ACK) && utcp->accept) {
1065 // If we don't want to accept it, send a RST back
1066 if((utcp->listen && !utcp->listen(utcp, hdr.dst))) {
1071 // Try to allocate memory, otherwise send a RST back
1072 c = allocate_connection(utcp, hdr.dst, hdr.src);
1079 // Parse auxilliary information
1086 c->flags = init[3] & 0x7;
1088 c->flags = UTCP_UDP;
1092 // Return SYN+ACK, go to SYN_RECEIVED state
1093 c->snd.wnd = hdr.wnd;
1094 c->rcv.irs = hdr.seq;
1095 c->rcv.nxt = c->rcv.irs + 1;
1096 set_state(c, SYN_RECEIVED);
1103 pkt.hdr.src = c->src;
1104 pkt.hdr.dst = c->dst;
1105 pkt.hdr.ack = c->rcv.irs + 1;
1106 pkt.hdr.seq = c->snd.iss;
1107 pkt.hdr.wnd = c->rcvbuf.maxsize;
1108 pkt.hdr.ctl = SYN | ACK;
1111 pkt.hdr.aux = 0x0101;
1115 pkt.data[3] = c->flags & 0x7;
1116 print_packet(c, "send", &pkt, sizeof(hdr) + 4);
1117 utcp->send(utcp, &pkt, sizeof(hdr) + 4);
1120 print_packet(c, "send", &pkt, sizeof(hdr));
1121 utcp->send(utcp, &pkt, sizeof(hdr));
1124 start_retransmit_timer(c);
1126 // No, we don't want your packets, send a RST back
1134 debug(c, "state %s\n", strstate[c->state]);
1136 // In case this is for a CLOSED connection, ignore the packet.
1137 // TODO: make it so incoming packets can never match a CLOSED connection.
1139 if(c->state == CLOSED) {
1140 debug(c, "got packet for closed connection\n");
1144 // It is for an existing connection.
1146 // 1. Drop invalid packets.
1148 // 1a. Drop packets that should not happen in our current state.
1170 int32_t rcv_offset = seqdiff(hdr.seq, c->rcv.nxt);
1173 debug(c, "packet out of order, offset %u bytes", rcv_offset);
1178 c->snd.wnd = hdr.wnd; // TODO: move below
1180 // 1c. Drop packets with an invalid ACK.
1181 // ackno should not roll back, and it should also not be bigger than what we ever could have sent
1182 // (= snd.una + c->sndbuf.used).
1184 if(hdr.ack != c->snd.last && c->state >= ESTABLISHED) {
1185 hdr.ack = c->snd.una;
1188 // 2. Handle RST packets
1193 if(!(hdr.ctl & ACK)) {
1197 // The peer has refused our connection.
1198 set_state(c, CLOSED);
1199 errno = ECONNREFUSED;
1202 c->recv(c, NULL, 0);
1212 // We haven't told the application about this connection yet. Silently delete.
1224 // The peer has aborted our connection.
1225 set_state(c, CLOSED);
1227 buffer_clear(&c->sndbuf);
1228 buffer_clear(&c->rcvbuf);
1231 c->recv(c, NULL, 0);
1243 // As far as the application is concerned, the connection has already been closed.
1244 // If it has called utcp_close() already, we can immediately free this connection.
1250 // Otherwise, immediately move to the CLOSED state.
1251 set_state(c, CLOSED);
1264 if(!(hdr.ctl & ACK)) {
1269 // 3. Advance snd.una
1271 if(seqdiff(hdr.ack, c->snd.last) > 0 || seqdiff(hdr.ack, c->snd.una) < 0) {
1272 debug(c, "packet ack seqno out of range, %u <= %u < %u\n", c->snd.una, hdr.ack, c->snd.una + c->sndbuf.used);
1276 advanced = seqdiff(hdr.ack, c->snd.una);
1280 if(c->rtt_start.tv_sec) {
1281 if(c->rtt_seq == hdr.ack) {
1282 struct timespec now;
1283 clock_gettime(UTCP_CLOCK, &now);
1284 int32_t diff = timespec_diff_usec(&now, &c->rtt_start);
1285 update_rtt(c, diff);
1286 c->rtt_start.tv_sec = 0;
1287 } else if(c->rtt_seq < hdr.ack) {
1288 debug(c, "cancelling RTT measurement: %u < %u\n", c->rtt_seq, hdr.ack);
1289 c->rtt_start.tv_sec = 0;
1293 int32_t data_acked = advanced;
1301 // TODO: handle FIN as well.
1306 assert(data_acked >= 0);
1309 int32_t bufused = seqdiff(c->snd.last, c->snd.una);
1310 assert(data_acked <= bufused);
1314 buffer_discard(&c->sndbuf, data_acked);
1317 // Also advance snd.nxt if possible
1318 if(seqdiff(c->snd.nxt, hdr.ack) < 0) {
1319 c->snd.nxt = hdr.ack;
1322 c->snd.una = hdr.ack;
1325 if(c->dupack >= 3) {
1326 debug(c, "fast recovery ended\n");
1327 c->snd.cwnd = c->snd.ssthresh;
1333 // Increase the congestion window according to RFC 5681
1334 if(c->snd.cwnd < c->snd.ssthresh) {
1335 c->snd.cwnd += min(advanced, utcp->mss); // eq. 2
1337 c->snd.cwnd += max(1, (utcp->mss * utcp->mss) / c->snd.cwnd); // eq. 3
1340 if(c->snd.cwnd > c->sndbuf.maxsize) {
1341 c->snd.cwnd = c->sndbuf.maxsize;
1346 // Check if we have sent a FIN that is now ACKed.
1349 if(c->snd.una == c->snd.last) {
1350 set_state(c, FIN_WAIT_2);
1356 if(c->snd.una == c->snd.last) {
1357 clock_gettime(UTCP_CLOCK, &c->conn_timeout);
1358 c->conn_timeout.tv_sec += utcp->timeout;
1359 set_state(c, TIME_WAIT);
1372 if(c->snd.una == c->snd.last) {
1373 stop_retransmit_timer(c);
1374 timespec_clear(&c->conn_timeout);
1379 // 5. Process SYN stuff
1385 // This is a SYNACK. It should always have ACKed the SYN.
1390 c->rcv.irs = hdr.seq;
1391 c->rcv.nxt = hdr.seq + 1;
1395 set_state(c, FIN_WAIT_1);
1397 set_state(c, ESTABLISHED);
1403 // This is a retransmit of a SYN, send back the SYNACK.
1413 // This could be a retransmission. Ignore the SYN flag, but send an ACK back.
1424 // 6. Process new data
1426 if(c->state == SYN_RECEIVED) {
1427 // This is the ACK after the SYNACK. It should always have ACKed the SYNACK.
1432 // Are we still LISTENing?
1434 utcp->accept(c, c->src);
1437 if(c->state != ESTABLISHED) {
1438 set_state(c, CLOSED);
1448 // This should never happen.
1460 // We already closed the connection and are not interested in more data.
1470 // Ehm no, We should never receive more data after a FIN.
1480 handle_incoming_data(c, &hdr, ptr, len);
1483 // 7. Process FIN stuff
1489 // This should never happen.
1496 set_state(c, CLOSE_WAIT);
1500 set_state(c, CLOSING);
1504 clock_gettime(UTCP_CLOCK, &c->conn_timeout);
1505 c->conn_timeout.tv_sec += utcp->timeout;
1506 set_state(c, TIME_WAIT);
1513 // Ehm, no. We should never receive a second FIN.
1523 // FIN counts as one sequence number
1527 // Inform the application that the peer closed its end of the connection.
1530 c->recv(c, NULL, 0);
1534 // Now we send something back if:
1535 // - we received data, so we have to send back an ACK
1536 // -> sendatleastone = true
1537 // - or we got an ack, so we should maybe send a bit more data
1538 // -> sendatleastone = false
1540 if(hdr.ctl & SYN || hdr.ctl & FIN) {
1555 hdr.ack = hdr.seq + len;
1557 hdr.ctl = RST | ACK;
1560 print_packet(c, "send", &hdr, sizeof(hdr));
1561 utcp->send(utcp, &hdr, sizeof(hdr));
1566 int utcp_shutdown(struct utcp_connection *c, int dir) {
1567 debug(c, "shutdown %d at %u\n", dir, c ? c->snd.last : 0);
1575 debug(c, "shutdown() called on closed connection\n");
1580 if(!(dir == UTCP_SHUT_RD || dir == UTCP_SHUT_WR || dir == UTCP_SHUT_RDWR)) {
1585 // TCP does not have a provision for stopping incoming packets.
1586 // The best we can do is to just ignore them.
1587 if(dir == UTCP_SHUT_RD || dir == UTCP_SHUT_RDWR) {
1591 // The rest of the code deals with shutting down writes.
1592 if(dir == UTCP_SHUT_RD) {
1596 // Only process shutting down writes once.
1614 set_state(c, FIN_WAIT_1);
1622 set_state(c, CLOSING);
1635 if(!timespec_isset(&c->rtrx_timeout)) {
1636 start_retransmit_timer(c);
1642 static bool reset_connection(struct utcp_connection *c) {
1649 debug(c, "abort() called on closed connection\n");
1654 buffer_clear(&c->sndbuf);
1655 buffer_clear(&c->rcvbuf);
1666 set_state(c, CLOSED);
1674 set_state(c, CLOSED);
1684 hdr.seq = c->snd.nxt;
1685 hdr.ack = c->rcv.nxt;
1690 print_packet(c, "send", &hdr, sizeof(hdr));
1691 c->utcp->send(c->utcp, &hdr, sizeof(hdr));
1695 static void set_reapable(struct utcp_connection *c) {
1696 set_buffer_storage(&c->sndbuf, NULL, min(c->sndbuf.maxsize, DEFAULT_MAXSNDBUFSIZE));
1697 set_buffer_storage(&c->rcvbuf, NULL, min(c->rcvbuf.maxsize, DEFAULT_MAXRCVBUFSIZE));
1703 // Resets all connections, but does not invalidate connection handles
1704 void utcp_reset_all_connections(struct utcp *utcp) {
1710 for(int i = 0; i < utcp->nconnections; i++) {
1711 struct utcp_connection *c = utcp->connections[i];
1713 if(c->reapable || c->state == CLOSED) {
1717 reset_connection(c);
1721 c->recv(c, NULL, 0);
1728 int utcp_close(struct utcp_connection *c) {
1729 if(utcp_shutdown(c, SHUT_RDWR) && errno != ENOTCONN) {
1737 int utcp_abort(struct utcp_connection *c) {
1738 if(!reset_connection(c)) {
1747 * One call to this function will loop through all connections,
1748 * checking if something needs to be resent or not.
1749 * The return value is the time to the next timeout in milliseconds,
1750 * or maybe a negative value if the timeout is infinite.
1752 struct timespec utcp_timeout(struct utcp *utcp) {
1753 struct timespec now;
1754 clock_gettime(UTCP_CLOCK, &now);
1755 struct timespec next = {now.tv_sec + 3600, now.tv_nsec};
1757 for(int i = 0; i < utcp->nconnections; i++) {
1758 struct utcp_connection *c = utcp->connections[i];
1764 // delete connections that have been utcp_close()d.
1765 if(c->state == CLOSED) {
1767 debug(c, "reaping\n");
1775 if(timespec_isset(&c->conn_timeout) && timespec_lt(&c->conn_timeout, &now)) {
1778 buffer_clear(&c->sndbuf);
1779 buffer_clear(&c->rcvbuf);
1782 c->recv(c, NULL, 0);
1788 if(timespec_isset(&c->rtrx_timeout) && timespec_lt(&c->rtrx_timeout, &now)) {
1789 debug(c, "retransmitting after timeout\n");
1793 if(timespec_isset(&c->conn_timeout) && timespec_lt(&c->conn_timeout, &next)) {
1794 next = c->conn_timeout;
1797 if(timespec_isset(&c->rtrx_timeout) && timespec_lt(&c->rtrx_timeout, &next)) {
1798 next = c->rtrx_timeout;
1802 struct timespec diff;
1804 timespec_sub(&next, &now, &diff);
1809 bool utcp_is_active(struct utcp *utcp) {
1814 for(int i = 0; i < utcp->nconnections; i++)
1815 if(utcp->connections[i]->state != CLOSED && utcp->connections[i]->state != TIME_WAIT) {
1822 struct utcp *utcp_init(utcp_accept_t accept, utcp_listen_t listen, utcp_send_t send, void *priv) {
1828 struct utcp *utcp = calloc(1, sizeof(*utcp));
1834 utcp_set_mtu(utcp, DEFAULT_MTU);
1841 if(!CLOCK_GRANULARITY) {
1842 struct timespec res;
1843 clock_getres(UTCP_CLOCK, &res);
1844 CLOCK_GRANULARITY = res.tv_sec * USEC_PER_SEC + res.tv_nsec / 1000;
1847 utcp->accept = accept;
1848 utcp->listen = listen;
1851 utcp->timeout = DEFAULT_USER_TIMEOUT; // sec
1856 void utcp_exit(struct utcp *utcp) {
1861 for(int i = 0; i < utcp->nconnections; i++) {
1862 struct utcp_connection *c = utcp->connections[i];
1865 buffer_clear(&c->sndbuf);
1866 buffer_clear(&c->rcvbuf);
1869 c->recv(c, NULL, 0);
1873 buffer_exit(&c->rcvbuf);
1874 buffer_exit(&c->sndbuf);
1878 free(utcp->connections);
1883 uint16_t utcp_get_mtu(struct utcp *utcp) {
1884 return utcp ? utcp->mtu : 0;
1887 uint16_t utcp_get_mss(struct utcp *utcp) {
1888 return utcp ? utcp->mss : 0;
1891 void utcp_set_mtu(struct utcp *utcp, uint16_t mtu) {
1896 if(mtu <= sizeof(struct hdr)) {
1900 if(mtu > utcp->mtu) {
1901 char *new = realloc(utcp->pkt, mtu + sizeof(struct hdr));
1911 utcp->mss = mtu - sizeof(struct hdr);
1914 void utcp_reset_timers(struct utcp *utcp) {
1919 struct timespec now, then;
1921 clock_gettime(UTCP_CLOCK, &now);
1925 then.tv_sec += utcp->timeout;
1927 for(int i = 0; i < utcp->nconnections; i++) {
1928 struct utcp_connection *c = utcp->connections[i];
1934 if(timespec_isset(&c->rtrx_timeout)) {
1935 c->rtrx_timeout = now;
1938 if(timespec_isset(&c->conn_timeout)) {
1939 c->conn_timeout = then;
1942 c->rtt_start.tv_sec = 0;
1944 if(c->rto > START_RTO) {
1950 int utcp_get_user_timeout(struct utcp *u) {
1951 return u ? u->timeout : 0;
1954 void utcp_set_user_timeout(struct utcp *u, int timeout) {
1956 u->timeout = timeout;
1960 size_t utcp_get_sndbuf(struct utcp_connection *c) {
1961 return c ? c->sndbuf.maxsize : 0;
1964 size_t utcp_get_sndbuf_free(struct utcp_connection *c) {
1974 return buffer_free(&c->sndbuf);
1981 void utcp_set_sndbuf(struct utcp_connection *c, void *data, size_t size) {
1986 set_buffer_storage(&c->sndbuf, data, size);
1989 size_t utcp_get_rcvbuf(struct utcp_connection *c) {
1990 return c ? c->rcvbuf.maxsize : 0;
1993 size_t utcp_get_rcvbuf_free(struct utcp_connection *c) {
1994 if(c && (c->state == ESTABLISHED || c->state == CLOSE_WAIT)) {
1995 return buffer_free(&c->rcvbuf);
2001 void utcp_set_rcvbuf(struct utcp_connection *c, void *data, size_t size) {
2006 set_buffer_storage(&c->rcvbuf, data, size);
2009 size_t utcp_get_sendq(struct utcp_connection *c) {
2010 return c->sndbuf.used;
2013 size_t utcp_get_recvq(struct utcp_connection *c) {
2014 return c->rcvbuf.used;
2017 bool utcp_get_nodelay(struct utcp_connection *c) {
2018 return c ? c->nodelay : false;
2021 void utcp_set_nodelay(struct utcp_connection *c, bool nodelay) {
2023 c->nodelay = nodelay;
2027 bool utcp_get_keepalive(struct utcp_connection *c) {
2028 return c ? c->keepalive : false;
2031 void utcp_set_keepalive(struct utcp_connection *c, bool keepalive) {
2033 c->keepalive = keepalive;
2037 size_t utcp_get_outq(struct utcp_connection *c) {
2038 return c ? seqdiff(c->snd.nxt, c->snd.una) : 0;
2041 void utcp_set_recv_cb(struct utcp_connection *c, utcp_recv_t recv) {
2047 void utcp_set_accept_cb(struct utcp *utcp, utcp_accept_t accept, utcp_listen_t listen) {
2049 utcp->accept = accept;
2050 utcp->listen = listen;
2054 void utcp_expect_data(struct utcp_connection *c, bool expect) {
2055 if(!c || c->reapable) {
2059 if(!(c->state == ESTABLISHED || c->state == FIN_WAIT_1 || c->state == FIN_WAIT_2)) {
2064 // If we expect data, start the connection timer.
2065 if(!timespec_isset(&c->conn_timeout)) {
2066 clock_gettime(UTCP_CLOCK, &c->conn_timeout);
2067 c->conn_timeout.tv_sec += c->utcp->timeout;
2070 // If we want to cancel expecting data, only clear the timer when there is no unACKed data.
2071 if(c->snd.una == c->snd.last) {
2072 timespec_clear(&c->conn_timeout);
2077 void utcp_set_flags(struct utcp_connection *c, uint32_t flags) {
2078 c->flags &= ~UTCP_CHANGEABLE_FLAGS;
2079 c->flags |= flags & UTCP_CHANGEABLE_FLAGS;
2082 void utcp_offline(struct utcp *utcp, bool offline) {
2083 struct timespec now;
2084 clock_gettime(UTCP_CLOCK, &now);
2086 for(int i = 0; i < utcp->nconnections; i++) {
2087 struct utcp_connection *c = utcp->connections[i];
2093 utcp_expect_data(c, offline);
2096 if(timespec_isset(&c->rtrx_timeout)) {
2097 c->rtrx_timeout = now;
2100 utcp->connections[i]->rtt_start.tv_sec = 0;
2102 if(c->rto > START_RTO) {
2109 void utcp_set_clock_granularity(long granularity) {
2110 CLOCK_GRANULARITY = granularity;