2 utcp.c -- Userspace TCP
3 Copyright (C) 2014-2017 Guus Sliepen <guus@tinc-vpn.org>
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License along
16 with this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
23 #include "utcp_priv.h"
38 #if defined(CLOCK_MONOTONIC_RAW) && defined(__x86_64__)
39 #define UTCP_CLOCK CLOCK_MONOTONIC_RAW
41 #define UTCP_CLOCK CLOCK_MONOTONIC
45 static void timespec_sub(const struct timespec *a, const struct timespec *b, struct timespec *r) {
46 r->tv_sec = a->tv_sec - b->tv_sec;
47 r->tv_nsec = a->tv_nsec - b->tv_nsec;
50 r->tv_sec--, r->tv_nsec += NSEC_PER_SEC;
54 static int32_t timespec_diff_usec(const struct timespec *a, const struct timespec *b) {
55 return (a->tv_sec - b->tv_sec) * 1000000 + (a->tv_nsec - b->tv_nsec) / 1000;
58 static bool timespec_lt(const struct timespec *a, const struct timespec *b) {
59 if(a->tv_sec == b->tv_sec) {
60 return a->tv_nsec < b->tv_nsec;
62 return a->tv_sec < b->tv_sec;
66 static void timespec_clear(struct timespec *a) {
71 static bool timespec_isset(const struct timespec *a) {
75 static long CLOCK_GRANULARITY; // usec
77 static inline size_t min(size_t a, size_t b) {
81 static inline size_t max(size_t a, size_t b) {
88 #ifndef UTCP_DEBUG_DATALEN
89 #define UTCP_DEBUG_DATALEN 20
92 static void debug(struct utcp_connection *c, const char *format, ...) {
97 clock_gettime(CLOCK_REALTIME, &tv);
98 len = snprintf(buf, sizeof(buf), "%ld.%06lu %u:%u ", (long)tv.tv_sec, tv.tv_nsec / 1000, c ? c->src : 0, c ? c->dst : 0);
100 va_start(ap, format);
101 len += vsnprintf(buf + len, sizeof(buf) - len, format, ap);
104 if(len > 0 && (size_t)len < sizeof(buf)) {
105 fwrite(buf, len, 1, stderr);
109 static void print_packet(struct utcp_connection *c, const char *dir, const void *pkt, size_t len) {
112 if(len < sizeof(hdr)) {
113 debug(c, "%s: short packet (%lu bytes)\n", dir, (unsigned long)len);
117 memcpy(&hdr, pkt, sizeof(hdr));
121 if(len > sizeof(hdr)) {
122 datalen = min(len - sizeof(hdr), UTCP_DEBUG_DATALEN);
128 const uint8_t *data = (uint8_t *)pkt + sizeof(hdr);
129 char str[datalen * 2 + 1];
132 for(uint32_t i = 0; i < datalen; i++) {
133 *p++ = "0123456789ABCDEF"[data[i] >> 4];
134 *p++ = "0123456789ABCDEF"[data[i] & 15];
139 debug(c, "%s: len %lu src %u dst %u seq %u ack %u wnd %u aux %x ctl %s%s%s%s%s data %s\n",
140 dir, (unsigned long)len, hdr.src, hdr.dst, hdr.seq, hdr.ack, hdr.wnd, hdr.aux,
141 hdr.ctl & SYN ? "SYN" : "",
142 hdr.ctl & RST ? "RST" : "",
143 hdr.ctl & FIN ? "FIN" : "",
144 hdr.ctl & ACK ? "ACK" : "",
145 hdr.ctl & MF ? "MF" : "",
150 static void debug_cwnd(struct utcp_connection *c) {
151 debug(c, "snd.cwnd %u snd.ssthresh %u\n", c->snd.cwnd, ~c->snd.ssthresh ? c->snd.ssthresh : 0);
154 #define debug(...) do {} while(0)
155 #define print_packet(...) do {} while(0)
156 #define debug_cwnd(...) do {} while(0)
159 static void set_state(struct utcp_connection *c, enum state state) {
162 if(state == ESTABLISHED) {
163 timespec_clear(&c->conn_timeout);
166 debug(c, "state %s\n", strstate[state]);
169 static bool fin_wanted(struct utcp_connection *c, uint32_t seq) {
170 if(seq != c->snd.last) {
185 static bool is_reliable(struct utcp_connection *c) {
186 return c->flags & UTCP_RELIABLE;
189 static int32_t seqdiff(uint32_t a, uint32_t b) {
194 static bool buffer_wraps(struct buffer *buf) {
195 return buf->size - buf->offset < buf->used;
198 static bool buffer_resize(struct buffer *buf, uint32_t newsize) {
199 assert(!buf->external);
209 char *newdata = realloc(buf->data, newsize);
217 if(buffer_wraps(buf)) {
218 // Shift the right part of the buffer until it hits the end of the new buffer.
222 // [345.........|........012]
223 uint32_t tailsize = buf->size - buf->offset;
224 uint32_t newoffset = newsize - tailsize;
225 memmove(buf->data + newoffset, buf->data + buf->offset, tailsize);
226 buf->offset = newoffset;
233 // Store data into the buffer
234 static ssize_t buffer_put_at(struct buffer *buf, size_t offset, const void *data, size_t len) {
235 debug(NULL, "buffer_put_at %lu %lu %lu\n", (unsigned long)buf->used, (unsigned long)offset, (unsigned long)len);
237 // Ensure we don't store more than maxsize bytes in total
238 size_t required = offset + len;
240 if(required > buf->maxsize) {
241 if(offset >= buf->maxsize) {
245 len = buf->maxsize - offset;
246 required = buf->maxsize;
249 // Check if we need to resize the buffer
250 if(required > buf->size) {
251 size_t newsize = buf->size;
259 } while(newsize < required);
261 if(newsize > buf->maxsize) {
262 newsize = buf->maxsize;
265 if(!buffer_resize(buf, newsize)) {
270 uint32_t realoffset = buf->offset + offset;
272 if(buf->size - buf->offset <= offset) {
273 // The offset wrapped
274 realoffset -= buf->size;
277 if(buf->size - realoffset < len) {
278 // The new chunk of data must be wrapped
279 memcpy(buf->data + realoffset, data, buf->size - realoffset);
280 memcpy(buf->data, (char *)data + buf->size - realoffset, len - (buf->size - realoffset));
282 memcpy(buf->data + realoffset, data, len);
285 if(required > buf->used) {
286 buf->used = required;
292 static ssize_t buffer_put(struct buffer *buf, const void *data, size_t len) {
293 return buffer_put_at(buf, buf->used, data, len);
296 // Copy data from the buffer without removing it.
297 static ssize_t buffer_copy(struct buffer *buf, void *data, size_t offset, size_t len) {
298 // Ensure we don't copy more than is actually stored in the buffer
299 if(offset >= buf->used) {
303 if(buf->used - offset < len) {
304 len = buf->used - offset;
307 uint32_t realoffset = buf->offset + offset;
309 if(buf->size - buf->offset <= offset) {
310 // The offset wrapped
311 realoffset -= buf->size;
314 if(buf->size - realoffset < len) {
315 // The data is wrapped
316 memcpy(data, buf->data + realoffset, buf->size - realoffset);
317 memcpy((char *)data + buf->size - realoffset, buf->data, len - (buf->size - realoffset));
319 memcpy(data, buf->data + realoffset, len);
325 // Copy data from the buffer without removing it.
326 static ssize_t buffer_call(struct utcp_connection *c, struct buffer *buf, size_t offset, size_t len) {
331 // Ensure we don't copy more than is actually stored in the buffer
332 if(offset >= buf->used) {
336 if(buf->used - offset < len) {
337 len = buf->used - offset;
340 uint32_t realoffset = buf->offset + offset;
342 if(buf->size - buf->offset <= offset) {
343 // The offset wrapped
344 realoffset -= buf->size;
347 if(buf->size - realoffset < len) {
348 // The data is wrapped
349 ssize_t rx1 = c->recv(c, buf->data + realoffset, buf->size - realoffset);
351 if(rx1 < buf->size - realoffset) {
355 // The channel might have been closed by the previous callback
360 ssize_t rx2 = c->recv(c, buf->data, len - (buf->size - realoffset));
368 return c->recv(c, buf->data + realoffset, len);
372 // Discard data from the buffer.
373 static ssize_t buffer_discard(struct buffer *buf, size_t len) {
374 if(buf->used < len) {
378 if(buf->size - buf->offset <= len) {
379 buf->offset -= buf->size;
382 if(buf->used == len) {
393 static void buffer_clear(struct buffer *buf) {
398 static bool buffer_set_size(struct buffer *buf, uint32_t minsize, uint32_t maxsize) {
399 if(maxsize < minsize) {
403 buf->maxsize = maxsize;
405 return buf->size >= minsize || buffer_resize(buf, minsize);
408 static void buffer_transfer(struct buffer *buf, char *newdata, size_t newsize) {
409 if(buffer_wraps(buf)) {
414 uint32_t tailsize = buf->size - buf->offset;
415 memcpy(newdata, buf->data + buf->offset, tailsize);
416 memcpy(newdata + tailsize, buf->data, buf->used - tailsize);
422 memcpy(newdata, buf->data + buf->offset, buf->used);
429 static void set_buffer_storage(struct buffer *buf, char *data, size_t size) {
430 if(size > UINT32_MAX) {
438 // Don't allow resizing an external buffer
442 if(size < buf->used) {
443 // Ignore requests for an external buffer if we are already using more than it can store
447 // Transition from internal to external buffer
448 buffer_transfer(buf, data, size);
451 buf->external = true;
452 } else if(buf->external) {
453 // Transition from external to internal buf
454 size_t minsize = buf->used <= DEFAULT_SNDBUFSIZE ? DEFAULT_SNDBUFSIZE : buf->used;
457 data = malloc(minsize);
460 // Cannot handle this
464 buffer_transfer(buf, data, minsize);
471 buf->external = false;
473 // Don't do anything if the buffer wraps
474 if(buffer_wraps(buf)) {
478 // Realloc internal storage
479 size_t minsize = max(DEFAULT_SNDBUFSIZE, buf->offset + buf->used);
482 data = realloc(buf->data, minsize);
496 static void buffer_exit(struct buffer *buf) {
501 memset(buf, 0, sizeof(*buf));
504 static uint32_t buffer_free(const struct buffer *buf) {
505 return buf->maxsize > buf->used ? buf->maxsize - buf->used : 0;
508 // Connections are stored in a sorted list.
509 // This gives O(log(N)) lookup time, O(N log(N)) insertion time and O(N) deletion time.
511 static int compare(const void *va, const void *vb) {
514 const struct utcp_connection *a = *(struct utcp_connection **)va;
515 const struct utcp_connection *b = *(struct utcp_connection **)vb;
519 int c = (int)a->src - (int)b->src;
525 c = (int)a->dst - (int)b->dst;
529 static struct utcp_connection *find_connection(const struct utcp *utcp, uint16_t src, uint16_t dst) {
530 if(!utcp->nconnections) {
534 struct utcp_connection key = {
538 struct utcp_connection **match = bsearch(&keyp, utcp->connections, utcp->nconnections, sizeof(*utcp->connections), compare);
539 return match ? *match : NULL;
542 static void free_connection(struct utcp_connection *c) {
543 struct utcp *utcp = c->utcp;
544 struct utcp_connection **cp = bsearch(&c, utcp->connections, utcp->nconnections, sizeof(*utcp->connections), compare);
548 int i = cp - utcp->connections;
549 memmove(cp, cp + 1, (utcp->nconnections - i - 1) * sizeof(*cp));
550 utcp->nconnections--;
552 buffer_exit(&c->rcvbuf);
553 buffer_exit(&c->sndbuf);
557 static struct utcp_connection *allocate_connection(struct utcp *utcp, uint16_t src, uint16_t dst) {
558 // Check whether this combination of src and dst is free
561 if(find_connection(utcp, src, dst)) {
565 } else { // If src == 0, generate a random port number with the high bit set
566 if(utcp->nconnections >= 32767) {
571 src = rand() | 0x8000;
573 while(find_connection(utcp, src, dst)) {
578 // Allocate memory for the new connection
580 if(utcp->nconnections >= utcp->nallocated) {
581 if(!utcp->nallocated) {
582 utcp->nallocated = 4;
584 utcp->nallocated *= 2;
587 struct utcp_connection **new_array = realloc(utcp->connections, utcp->nallocated * sizeof(*utcp->connections));
593 utcp->connections = new_array;
596 struct utcp_connection *c = calloc(1, sizeof(*c));
602 if(!buffer_set_size(&c->sndbuf, DEFAULT_SNDBUFSIZE, DEFAULT_MAXSNDBUFSIZE)) {
607 if(!buffer_set_size(&c->rcvbuf, DEFAULT_RCVBUFSIZE, DEFAULT_MAXRCVBUFSIZE)) {
608 buffer_exit(&c->sndbuf);
613 // Fill in the details
622 c->snd.una = c->snd.iss;
623 c->snd.nxt = c->snd.iss + 1;
624 c->snd.last = c->snd.nxt;
625 c->snd.cwnd = (utcp->mss > 2190 ? 2 : utcp->mss > 1095 ? 3 : 4) * utcp->mss;
626 c->snd.ssthresh = ~0;
633 // Add it to the sorted list of connections
635 utcp->connections[utcp->nconnections++] = c;
636 qsort(utcp->connections, utcp->nconnections, sizeof(*utcp->connections), compare);
641 static inline uint32_t absdiff(uint32_t a, uint32_t b) {
649 // Update RTT variables. See RFC 6298.
650 static void update_rtt(struct utcp_connection *c, uint32_t rtt) {
652 debug(c, "invalid rtt\n");
660 c->rttvar = (c->rttvar * 3 + absdiff(c->srtt, rtt)) / 4;
661 c->srtt = (c->srtt * 7 + rtt) / 8;
664 c->rto = c->srtt + max(4 * c->rttvar, CLOCK_GRANULARITY);
666 if(c->rto > MAX_RTO) {
670 debug(c, "rtt %u srtt %u rttvar %u rto %u\n", rtt, c->srtt, c->rttvar, c->rto);
673 static void start_retransmit_timer(struct utcp_connection *c) {
674 clock_gettime(UTCP_CLOCK, &c->rtrx_timeout);
676 uint32_t rto = c->rto;
678 while(rto > USEC_PER_SEC) {
679 c->rtrx_timeout.tv_sec++;
683 c->rtrx_timeout.tv_nsec += rto * 1000;
685 if(c->rtrx_timeout.tv_nsec >= NSEC_PER_SEC) {
686 c->rtrx_timeout.tv_nsec -= NSEC_PER_SEC;
687 c->rtrx_timeout.tv_sec++;
690 debug(c, "rtrx_timeout %ld.%06lu\n", c->rtrx_timeout.tv_sec, c->rtrx_timeout.tv_nsec);
693 static void stop_retransmit_timer(struct utcp_connection *c) {
694 timespec_clear(&c->rtrx_timeout);
695 debug(c, "rtrx_timeout cleared\n");
698 struct utcp_connection *utcp_connect_ex(struct utcp *utcp, uint16_t dst, utcp_recv_t recv, void *priv, uint32_t flags) {
699 struct utcp_connection *c = allocate_connection(utcp, 0, dst);
705 assert((flags & ~0x1f) == 0);
716 pkt.hdr.src = c->src;
717 pkt.hdr.dst = c->dst;
718 pkt.hdr.seq = c->snd.iss;
720 pkt.hdr.wnd = c->rcvbuf.maxsize;
722 pkt.hdr.aux = 0x0101;
726 pkt.init[3] = flags & 0x7;
728 set_state(c, SYN_SENT);
730 print_packet(c, "send", &pkt, sizeof(pkt));
731 utcp->send(utcp, &pkt, sizeof(pkt));
733 clock_gettime(UTCP_CLOCK, &c->conn_timeout);
734 c->conn_timeout.tv_sec += utcp->timeout;
736 start_retransmit_timer(c);
741 struct utcp_connection *utcp_connect(struct utcp *utcp, uint16_t dst, utcp_recv_t recv, void *priv) {
742 return utcp_connect_ex(utcp, dst, recv, priv, UTCP_TCP);
745 void utcp_accept(struct utcp_connection *c, utcp_recv_t recv, void *priv) {
746 if(c->reapable || c->state != SYN_RECEIVED) {
747 debug(c, "accept() called on invalid connection in state %s\n", c, strstate[c->state]);
751 debug(c, "accepted %p %p\n", c, recv, priv);
755 set_state(c, ESTABLISHED);
758 static void ack(struct utcp_connection *c, bool sendatleastone) {
759 int32_t left = seqdiff(c->snd.last, c->snd.nxt);
760 int32_t cwndleft = is_reliable(c) ? min(c->snd.cwnd, c->snd.wnd) - seqdiff(c->snd.nxt, c->snd.una) : MAX_UNRELIABLE_SIZE;
766 } else if(cwndleft < left) {
769 if(!sendatleastone || cwndleft > c->utcp->mss) {
770 left -= left % c->utcp->mss;
774 debug(c, "cwndleft %d left %d\n", cwndleft, left);
776 if(!left && !sendatleastone) {
783 } *pkt = c->utcp->pkt;
785 pkt->hdr.src = c->src;
786 pkt->hdr.dst = c->dst;
787 pkt->hdr.ack = c->rcv.nxt;
788 pkt->hdr.wnd = is_reliable(c) ? c->rcvbuf.maxsize : 0;
793 uint32_t seglen = left > c->utcp->mss ? c->utcp->mss : left;
794 pkt->hdr.seq = c->snd.nxt;
796 buffer_copy(&c->sndbuf, pkt->data, seqdiff(c->snd.nxt, c->snd.una), seglen);
798 c->snd.nxt += seglen;
801 if(!is_reliable(c)) {
809 if(seglen && fin_wanted(c, c->snd.nxt)) {
814 if(!c->rtt_start.tv_sec) {
815 // Start RTT measurement
816 clock_gettime(UTCP_CLOCK, &c->rtt_start);
817 c->rtt_seq = pkt->hdr.seq + seglen;
818 debug(c, "starting RTT measurement, expecting ack %u\n", c->rtt_seq);
821 print_packet(c, "send", pkt, sizeof(pkt->hdr) + seglen);
822 c->utcp->send(c->utcp, pkt, sizeof(pkt->hdr) + seglen);
824 if(left && !is_reliable(c)) {
825 pkt->hdr.wnd += seglen;
830 ssize_t utcp_send(struct utcp_connection *c, const void *data, size_t len) {
832 debug(c, "send() called on closed connection\n");
840 debug(c, "send() called on unconnected connection\n");
855 debug(c, "send() called on closed connection\n");
860 // Exit early if we have nothing to send.
871 // Check if we need to be able to buffer all data
873 if(c->flags & UTCP_NO_PARTIAL) {
874 if(len > buffer_free(&c->sndbuf)) {
875 if(len > c->sndbuf.maxsize) {
885 // Add data to send buffer.
888 len = buffer_put(&c->sndbuf, data, len);
889 } else if(c->state != SYN_SENT && c->state != SYN_RECEIVED) {
890 if(len > MAX_UNRELIABLE_SIZE || buffer_put(&c->sndbuf, data, len) != (ssize_t)len) {
909 // Don't send anything yet if the connection has not fully established yet
911 if(c->state == SYN_SENT || c->state == SYN_RECEIVED) {
917 if(!is_reliable(c)) {
918 c->snd.una = c->snd.nxt = c->snd.last;
919 buffer_discard(&c->sndbuf, c->sndbuf.used);
922 if(is_reliable(c) && !timespec_isset(&c->rtrx_timeout)) {
923 start_retransmit_timer(c);
926 if(is_reliable(c) && !timespec_isset(&c->conn_timeout)) {
927 clock_gettime(UTCP_CLOCK, &c->conn_timeout);
928 c->conn_timeout.tv_sec += c->utcp->timeout;
934 static void swap_ports(struct hdr *hdr) {
935 uint16_t tmp = hdr->src;
940 static void fast_retransmit(struct utcp_connection *c) {
941 if(c->state == CLOSED || c->snd.last == c->snd.una) {
942 debug(c, "fast_retransmit() called but nothing to retransmit!\n");
946 struct utcp *utcp = c->utcp;
951 } *pkt = c->utcp->pkt;
953 pkt->hdr.src = c->src;
954 pkt->hdr.dst = c->dst;
955 pkt->hdr.wnd = c->rcvbuf.maxsize;
964 // Send unacked data again.
965 pkt->hdr.seq = c->snd.una;
966 pkt->hdr.ack = c->rcv.nxt;
968 uint32_t len = min(seqdiff(c->snd.last, c->snd.una), utcp->mss);
970 if(fin_wanted(c, c->snd.una + len)) {
975 buffer_copy(&c->sndbuf, pkt->data, 0, len);
976 print_packet(c, "rtrx", pkt, sizeof(pkt->hdr) + len);
977 utcp->send(utcp, pkt, sizeof(pkt->hdr) + len);
985 static void retransmit(struct utcp_connection *c) {
986 if(c->state == CLOSED || c->snd.last == c->snd.una) {
987 debug(c, "retransmit() called but nothing to retransmit!\n");
988 stop_retransmit_timer(c);
992 struct utcp *utcp = c->utcp;
994 if(utcp->retransmit) {
1001 } *pkt = c->utcp->pkt;
1003 pkt->hdr.src = c->src;
1004 pkt->hdr.dst = c->dst;
1005 pkt->hdr.wnd = c->rcvbuf.maxsize;
1010 // Send our SYN again
1011 pkt->hdr.seq = c->snd.iss;
1014 pkt->hdr.aux = 0x0101;
1018 pkt->data[3] = c->flags & 0x7;
1019 print_packet(c, "rtrx", pkt, sizeof(pkt->hdr) + 4);
1020 utcp->send(utcp, pkt, sizeof(pkt->hdr) + 4);
1024 // Send SYNACK again
1025 pkt->hdr.seq = c->snd.nxt;
1026 pkt->hdr.ack = c->rcv.nxt;
1027 pkt->hdr.ctl = SYN | ACK;
1028 print_packet(c, "rtrx", pkt, sizeof(pkt->hdr));
1029 utcp->send(utcp, pkt, sizeof(pkt->hdr));
1037 // Send unacked data again.
1038 pkt->hdr.seq = c->snd.una;
1039 pkt->hdr.ack = c->rcv.nxt;
1041 uint32_t len = min(seqdiff(c->snd.last, c->snd.una), utcp->mss);
1043 if(fin_wanted(c, c->snd.una + len)) {
1045 pkt->hdr.ctl |= FIN;
1048 // RFC 5681 slow start after timeout
1049 uint32_t flightsize = seqdiff(c->snd.nxt, c->snd.una);
1050 c->snd.ssthresh = max(flightsize / 2, utcp->mss * 2); // eq. 4
1051 c->snd.cwnd = utcp->mss;
1054 buffer_copy(&c->sndbuf, pkt->data, 0, len);
1055 print_packet(c, "rtrx", pkt, sizeof(pkt->hdr) + len);
1056 utcp->send(utcp, pkt, sizeof(pkt->hdr) + len);
1058 c->snd.nxt = c->snd.una + len;
1065 // We shouldn't need to retransmit anything in this state.
1069 stop_retransmit_timer(c);
1073 start_retransmit_timer(c);
1076 if(c->rto > MAX_RTO) {
1080 c->rtt_start.tv_sec = 0; // invalidate RTT timer
1081 c->dupack = 0; // cancel any ongoing fast recovery
1087 /* Update receive buffer and SACK entries after consuming data.
1091 * |.....0000..1111111111.....22222......3333|
1094 * 0..3 represent the SACK entries. The ^ indicates up to which point we want
1095 * to remove data from the receive buffer. The idea is to substract "len"
1096 * from the offset of all the SACK entries, and then remove/cut down entries
1097 * that are shifted to before the start of the receive buffer.
1099 * There are three cases:
1100 * - the SACK entry is after ^, in that case just change the offset.
1101 * - the SACK entry starts before and ends after ^, so we have to
1102 * change both its offset and size.
1103 * - the SACK entry is completely before ^, in that case delete it.
1105 static void sack_consume(struct utcp_connection *c, size_t len) {
1106 debug(c, "sack_consume %lu\n", (unsigned long)len);
1108 if(len > c->rcvbuf.used) {
1109 debug(c, "all SACK entries consumed\n");
1110 c->sacks[0].len = 0;
1114 buffer_discard(&c->rcvbuf, len);
1116 for(int i = 0; i < NSACKS && c->sacks[i].len;) {
1117 if(len < c->sacks[i].offset) {
1118 c->sacks[i].offset -= len;
1120 } else if(len < c->sacks[i].offset + c->sacks[i].len) {
1121 c->sacks[i].len -= len - c->sacks[i].offset;
1122 c->sacks[i].offset = 0;
1125 if(i < NSACKS - 1) {
1126 memmove(&c->sacks[i], &c->sacks[i + 1], (NSACKS - 1 - i) * sizeof(c->sacks)[i]);
1127 c->sacks[NSACKS - 1].len = 0;
1129 c->sacks[i].len = 0;
1135 for(int i = 0; i < NSACKS && c->sacks[i].len; i++) {
1136 debug(c, "SACK[%d] offset %u len %u\n", i, c->sacks[i].offset, c->sacks[i].len);
1140 static void handle_out_of_order(struct utcp_connection *c, uint32_t offset, const void *data, size_t len) {
1141 debug(c, "out of order packet, offset %u\n", offset);
1142 // Packet loss or reordering occured. Store the data in the buffer.
1143 ssize_t rxd = buffer_put_at(&c->rcvbuf, offset, data, len);
1146 debug(c, "packet outside receive buffer, dropping\n");
1150 if((size_t)rxd < len) {
1151 debug(c, "packet partially outside receive buffer\n");
1155 // Make note of where we put it.
1156 for(int i = 0; i < NSACKS; i++) {
1157 if(!c->sacks[i].len) { // nothing to merge, add new entry
1158 debug(c, "new SACK entry %d\n", i);
1159 c->sacks[i].offset = offset;
1160 c->sacks[i].len = rxd;
1162 } else if(offset < c->sacks[i].offset) {
1163 if(offset + rxd < c->sacks[i].offset) { // insert before
1164 if(!c->sacks[NSACKS - 1].len) { // only if room left
1165 debug(c, "insert SACK entry at %d\n", i);
1166 memmove(&c->sacks[i + 1], &c->sacks[i], (NSACKS - i - 1) * sizeof(c->sacks)[i]);
1167 c->sacks[i].offset = offset;
1168 c->sacks[i].len = rxd;
1170 debug(c, "SACK entries full, dropping packet\n");
1175 debug(c, "merge with start of SACK entry at %d\n", i);
1176 c->sacks[i].offset = offset;
1179 } else if(offset <= c->sacks[i].offset + c->sacks[i].len) {
1180 if(offset + rxd > c->sacks[i].offset + c->sacks[i].len) { // merge
1181 debug(c, "merge with end of SACK entry at %d\n", i);
1182 c->sacks[i].len = offset + rxd - c->sacks[i].offset;
1183 // TODO: handle potential merge with next entry
1190 for(int i = 0; i < NSACKS && c->sacks[i].len; i++) {
1191 debug(c, "SACK[%d] offset %u len %u\n", i, c->sacks[i].offset, c->sacks[i].len);
1195 static void handle_in_order(struct utcp_connection *c, const void *data, size_t len) {
1197 ssize_t rxd = c->recv(c, data, len);
1199 if(rxd != (ssize_t)len) {
1200 // TODO: handle the application not accepting all data.
1205 // Check if we can process out-of-order data now.
1206 if(c->sacks[0].len && len >= c->sacks[0].offset) {
1207 debug(c, "incoming packet len %lu connected with SACK at %u\n", (unsigned long)len, c->sacks[0].offset);
1209 if(len < c->sacks[0].offset + c->sacks[0].len) {
1210 size_t offset = len;
1211 len = c->sacks[0].offset + c->sacks[0].len;
1212 size_t remainder = len - offset;
1214 ssize_t rxd = buffer_call(c, &c->rcvbuf, offset, remainder);
1216 if(rxd != (ssize_t)remainder) {
1217 // TODO: handle the application not accepting all data.
1223 if(c->rcvbuf.used) {
1224 sack_consume(c, len);
1230 static void handle_unreliable(struct utcp_connection *c, const struct hdr *hdr, const void *data, size_t len) {
1231 // Fast path for unfragmented packets
1232 if(!hdr->wnd && !(hdr->ctl & MF)) {
1234 c->recv(c, data, len);
1237 c->rcv.nxt = hdr->seq + len;
1241 // Ensure reassembled packet are not larger than 64 kiB
1242 if(hdr->wnd >= MAX_UNRELIABLE_SIZE || hdr->wnd + len > MAX_UNRELIABLE_SIZE) {
1246 // Don't accept out of order fragments
1247 if(hdr->wnd && hdr->seq != c->rcv.nxt) {
1251 // Reset the receive buffer for the first fragment
1253 buffer_clear(&c->rcvbuf);
1256 ssize_t rxd = buffer_put_at(&c->rcvbuf, hdr->wnd, data, len);
1258 if(rxd != (ssize_t)len) {
1262 // Send the packet if it's the final fragment
1263 if(!(hdr->ctl & MF)) {
1264 buffer_call(c, &c->rcvbuf, 0, hdr->wnd + len);
1267 c->rcv.nxt = hdr->seq + len;
1270 static void handle_incoming_data(struct utcp_connection *c, const struct hdr *hdr, const void *data, size_t len) {
1271 if(!is_reliable(c)) {
1272 handle_unreliable(c, hdr, data, len);
1276 uint32_t offset = seqdiff(hdr->seq, c->rcv.nxt);
1279 handle_out_of_order(c, offset, data, len);
1281 handle_in_order(c, data, len);
1286 ssize_t utcp_recv(struct utcp *utcp, const void *data, size_t len) {
1287 const uint8_t *ptr = data;
1303 // Drop packets smaller than the header
1307 if(len < sizeof(hdr)) {
1308 print_packet(NULL, "recv", data, len);
1313 // Make a copy from the potentially unaligned data to a struct hdr
1315 memcpy(&hdr, ptr, sizeof(hdr));
1317 // Try to match the packet to an existing connection
1319 struct utcp_connection *c = find_connection(utcp, hdr.dst, hdr.src);
1320 print_packet(c, "recv", data, len);
1322 // Process the header
1327 // Drop packets with an unknown CTL flag
1329 if(hdr.ctl & ~(SYN | ACK | RST | FIN | MF)) {
1330 print_packet(NULL, "recv", data, len);
1335 // Check for auxiliary headers
1337 const uint8_t *init = NULL;
1339 uint16_t aux = hdr.aux;
1342 size_t auxlen = 4 * (aux >> 8) & 0xf;
1343 uint8_t auxtype = aux & 0xff;
1352 if(!(hdr.ctl & SYN) || auxlen != 4) {
1368 if(!(aux & 0x800)) {
1377 memcpy(&aux, ptr, 2);
1382 bool has_data = len || (hdr.ctl & (SYN | FIN));
1384 // Is it for a new connection?
1387 // Ignore RST packets
1393 // Is it a SYN packet and are we LISTENing?
1395 if(hdr.ctl & SYN && !(hdr.ctl & ACK) && utcp->accept) {
1396 // If we don't want to accept it, send a RST back
1397 if((utcp->listen && !utcp->listen(utcp, hdr.dst))) {
1402 // Try to allocate memory, otherwise send a RST back
1403 c = allocate_connection(utcp, hdr.dst, hdr.src);
1410 // Parse auxilliary information
1417 c->flags = init[3] & 0x7;
1419 c->flags = UTCP_TCP;
1423 // Return SYN+ACK, go to SYN_RECEIVED state
1424 c->snd.wnd = hdr.wnd;
1425 c->rcv.irs = hdr.seq;
1426 c->rcv.nxt = c->rcv.irs + 1;
1427 set_state(c, SYN_RECEIVED);
1434 pkt.hdr.src = c->src;
1435 pkt.hdr.dst = c->dst;
1436 pkt.hdr.ack = c->rcv.irs + 1;
1437 pkt.hdr.seq = c->snd.iss;
1438 pkt.hdr.wnd = c->rcvbuf.maxsize;
1439 pkt.hdr.ctl = SYN | ACK;
1442 pkt.hdr.aux = 0x0101;
1446 pkt.data[3] = c->flags & 0x7;
1447 print_packet(c, "send", &pkt, sizeof(hdr) + 4);
1448 utcp->send(utcp, &pkt, sizeof(hdr) + 4);
1451 print_packet(c, "send", &pkt, sizeof(hdr));
1452 utcp->send(utcp, &pkt, sizeof(hdr));
1455 start_retransmit_timer(c);
1457 // No, we don't want your packets, send a RST back
1465 debug(c, "state %s\n", strstate[c->state]);
1467 // In case this is for a CLOSED connection, ignore the packet.
1468 // TODO: make it so incoming packets can never match a CLOSED connection.
1470 if(c->state == CLOSED) {
1471 debug(c, "got packet for closed connection\n");
1475 // It is for an existing connection.
1477 // 1. Drop invalid packets.
1479 // 1a. Drop packets that should not happen in our current state.
1500 // 1b. Discard data that is not in our receive window.
1502 if(is_reliable(c)) {
1505 if(c->state == SYN_SENT) {
1507 } else if(len == 0) {
1508 acceptable = seqdiff(hdr.seq, c->rcv.nxt) >= 0;
1510 int32_t rcv_offset = seqdiff(hdr.seq, c->rcv.nxt);
1512 // cut already accepted front overlapping
1513 if(rcv_offset < 0) {
1514 acceptable = len > (size_t) - rcv_offset;
1519 hdr.seq -= rcv_offset;
1522 acceptable = seqdiff(hdr.seq, c->rcv.nxt) >= 0 && seqdiff(hdr.seq, c->rcv.nxt) + len <= c->rcvbuf.maxsize;
1527 debug(c, "packet not acceptable, %u <= %u + %lu < %u\n", c->rcv.nxt, hdr.seq, (unsigned long)len, c->rcv.nxt + c->rcvbuf.maxsize);
1529 // Ignore unacceptable RST packets.
1534 // Otherwise, continue processing.
1539 int32_t rcv_offset = seqdiff(hdr.seq, c->rcv.nxt);
1542 debug(c, "packet out of order, offset %u bytes", rcv_offset);
1548 c->snd.wnd = hdr.wnd; // TODO: move below
1550 // 1c. Drop packets with an invalid ACK.
1551 // ackno should not roll back, and it should also not be bigger than what we ever could have sent
1552 // (= snd.una + c->sndbuf.used).
1554 if(!is_reliable(c)) {
1555 if(hdr.ack != c->snd.last && c->state >= ESTABLISHED) {
1556 hdr.ack = c->snd.una;
1560 // 2. Handle RST packets
1565 if(!(hdr.ctl & ACK)) {
1569 // The peer has refused our connection.
1570 set_state(c, CLOSED);
1571 errno = ECONNREFUSED;
1574 c->recv(c, NULL, 0);
1577 if(c->poll && !c->reapable) {
1588 // We haven't told the application about this connection yet. Silently delete.
1600 // The peer has aborted our connection.
1601 set_state(c, CLOSED);
1603 buffer_clear(&c->sndbuf);
1604 buffer_clear(&c->rcvbuf);
1607 c->recv(c, NULL, 0);
1610 if(c->poll && !c->reapable) {
1623 // As far as the application is concerned, the connection has already been closed.
1624 // If it has called utcp_close() already, we can immediately free this connection.
1630 // Otherwise, immediately move to the CLOSED state.
1631 set_state(c, CLOSED);
1644 if(!(hdr.ctl & ACK)) {
1649 // 3. Advance snd.una
1651 if(seqdiff(hdr.ack, c->snd.last) > 0 || seqdiff(hdr.ack, c->snd.una) < 0) {
1652 debug(c, "packet ack seqno out of range, %u <= %u < %u\n", c->snd.una, hdr.ack, c->snd.una + c->sndbuf.used);
1656 advanced = seqdiff(hdr.ack, c->snd.una);
1660 if(c->rtt_start.tv_sec) {
1661 if(c->rtt_seq == hdr.ack) {
1662 struct timespec now;
1663 clock_gettime(UTCP_CLOCK, &now);
1664 int32_t diff = timespec_diff_usec(&now, &c->rtt_start);
1665 update_rtt(c, diff);
1666 c->rtt_start.tv_sec = 0;
1667 } else if(c->rtt_seq < hdr.ack) {
1668 debug(c, "cancelling RTT measurement: %u < %u\n", c->rtt_seq, hdr.ack);
1669 c->rtt_start.tv_sec = 0;
1673 int32_t data_acked = advanced;
1681 // TODO: handle FIN as well.
1686 assert(data_acked >= 0);
1689 int32_t bufused = seqdiff(c->snd.last, c->snd.una);
1690 assert(data_acked <= bufused);
1694 buffer_discard(&c->sndbuf, data_acked);
1696 if(is_reliable(c)) {
1701 // Also advance snd.nxt if possible
1702 if(seqdiff(c->snd.nxt, hdr.ack) < 0) {
1703 c->snd.nxt = hdr.ack;
1706 c->snd.una = hdr.ack;
1709 if(c->dupack >= 3) {
1710 debug(c, "fast recovery ended\n");
1711 c->snd.cwnd = c->snd.ssthresh;
1717 // Increase the congestion window according to RFC 5681
1718 if(c->snd.cwnd < c->snd.ssthresh) {
1719 c->snd.cwnd += min(advanced, utcp->mss); // eq. 2
1721 c->snd.cwnd += max(1, (utcp->mss * utcp->mss) / c->snd.cwnd); // eq. 3
1724 if(c->snd.cwnd > c->sndbuf.maxsize) {
1725 c->snd.cwnd = c->sndbuf.maxsize;
1730 // Check if we have sent a FIN that is now ACKed.
1733 if(c->snd.una == c->snd.last) {
1734 set_state(c, FIN_WAIT_2);
1740 if(c->snd.una == c->snd.last) {
1741 clock_gettime(UTCP_CLOCK, &c->conn_timeout);
1742 c->conn_timeout.tv_sec += utcp->timeout;
1743 set_state(c, TIME_WAIT);
1752 if(!len && is_reliable(c) && c->snd.una != c->snd.last) {
1754 debug(c, "duplicate ACK %d\n", c->dupack);
1756 if(c->dupack == 3) {
1757 // RFC 5681 fast recovery
1758 debug(c, "fast recovery started\n", c->dupack);
1759 uint32_t flightsize = seqdiff(c->snd.nxt, c->snd.una);
1760 c->snd.ssthresh = max(flightsize / 2, utcp->mss * 2); // eq. 4
1761 c->snd.cwnd = min(c->snd.ssthresh + 3 * utcp->mss, c->sndbuf.maxsize);
1763 if(c->snd.cwnd > c->sndbuf.maxsize) {
1764 c->snd.cwnd = c->sndbuf.maxsize;
1770 } else if(c->dupack > 3) {
1771 c->snd.cwnd += utcp->mss;
1773 if(c->snd.cwnd > c->sndbuf.maxsize) {
1774 c->snd.cwnd = c->sndbuf.maxsize;
1780 // We got an ACK which indicates the other side did get one of our packets.
1781 // Reset the retransmission timer to avoid going to slow start,
1782 // but don't touch the connection timeout.
1783 start_retransmit_timer(c);
1790 if(c->snd.una == c->snd.last) {
1791 stop_retransmit_timer(c);
1792 timespec_clear(&c->conn_timeout);
1793 } else if(is_reliable(c)) {
1794 start_retransmit_timer(c);
1795 clock_gettime(UTCP_CLOCK, &c->conn_timeout);
1796 c->conn_timeout.tv_sec += utcp->timeout;
1801 // 5. Process SYN stuff
1807 // This is a SYNACK. It should always have ACKed the SYN.
1812 c->rcv.irs = hdr.seq;
1813 c->rcv.nxt = hdr.seq + 1;
1817 set_state(c, FIN_WAIT_1);
1820 set_state(c, ESTABLISHED);
1826 // This is a retransmit of a SYN, send back the SYNACK.
1836 // This could be a retransmission. Ignore the SYN flag, but send an ACK back.
1847 // 6. Process new data
1849 if(c->state == SYN_RECEIVED) {
1850 // This is the ACK after the SYNACK. It should always have ACKed the SYNACK.
1855 // Are we still LISTENing?
1857 utcp->accept(c, c->src);
1860 if(c->state != ESTABLISHED) {
1861 set_state(c, CLOSED);
1871 // This should never happen.
1883 // We already closed the connection and are not interested in more data.
1893 // Ehm no, We should never receive more data after a FIN.
1903 handle_incoming_data(c, &hdr, ptr, len);
1906 // 7. Process FIN stuff
1908 if((hdr.ctl & FIN) && (!is_reliable(c) || hdr.seq + len == c->rcv.nxt)) {
1912 // This should never happen.
1919 set_state(c, CLOSE_WAIT);
1923 set_state(c, CLOSING);
1927 clock_gettime(UTCP_CLOCK, &c->conn_timeout);
1928 c->conn_timeout.tv_sec += utcp->timeout;
1929 set_state(c, TIME_WAIT);
1936 // Ehm, no. We should never receive a second FIN.
1946 // FIN counts as one sequence number
1950 // Inform the application that the peer closed its end of the connection.
1953 c->recv(c, NULL, 0);
1957 // Now we send something back if:
1958 // - we received data, so we have to send back an ACK
1959 // -> sendatleastone = true
1960 // - or we got an ack, so we should maybe send a bit more data
1961 // -> sendatleastone = false
1963 if(is_reliable(c) || hdr.ctl & SYN || hdr.ctl & FIN) {
1978 hdr.ack = hdr.seq + len;
1980 hdr.ctl = RST | ACK;
1983 print_packet(c, "send", &hdr, sizeof(hdr));
1984 utcp->send(utcp, &hdr, sizeof(hdr));
1989 int utcp_shutdown(struct utcp_connection *c, int dir) {
1990 debug(c, "shutdown %d at %u\n", dir, c ? c->snd.last : 0);
1998 debug(c, "shutdown() called on closed connection\n");
2003 if(!(dir == UTCP_SHUT_RD || dir == UTCP_SHUT_WR || dir == UTCP_SHUT_RDWR)) {
2008 // TCP does not have a provision for stopping incoming packets.
2009 // The best we can do is to just ignore them.
2010 if(dir == UTCP_SHUT_RD || dir == UTCP_SHUT_RDWR) {
2014 // The rest of the code deals with shutting down writes.
2015 if(dir == UTCP_SHUT_RD) {
2019 // Only process shutting down writes once.
2037 set_state(c, FIN_WAIT_1);
2045 set_state(c, CLOSING);
2058 if(!timespec_isset(&c->rtrx_timeout)) {
2059 start_retransmit_timer(c);
2065 static bool reset_connection(struct utcp_connection *c) {
2072 debug(c, "abort() called on closed connection\n");
2077 buffer_clear(&c->sndbuf);
2078 buffer_clear(&c->rcvbuf);
2089 set_state(c, CLOSED);
2097 set_state(c, CLOSED);
2107 hdr.seq = c->snd.nxt;
2108 hdr.ack = c->rcv.nxt;
2113 print_packet(c, "send", &hdr, sizeof(hdr));
2114 c->utcp->send(c->utcp, &hdr, sizeof(hdr));
2118 static void set_reapable(struct utcp_connection *c) {
2119 set_buffer_storage(&c->sndbuf, NULL, min(c->sndbuf.maxsize, DEFAULT_MAXSNDBUFSIZE));
2120 set_buffer_storage(&c->rcvbuf, NULL, min(c->rcvbuf.maxsize, DEFAULT_MAXRCVBUFSIZE));
2127 // Resets all connections, but does not invalidate connection handles
2128 void utcp_reset_all_connections(struct utcp *utcp) {
2134 for(int i = 0; i < utcp->nconnections; i++) {
2135 struct utcp_connection *c = utcp->connections[i];
2137 if(c->reapable || c->state == CLOSED) {
2141 reset_connection(c);
2145 c->recv(c, NULL, 0);
2148 if(c->poll && !c->reapable) {
2157 int utcp_close(struct utcp_connection *c) {
2158 if(utcp_shutdown(c, SHUT_RDWR) && errno != ENOTCONN) {
2166 int utcp_abort(struct utcp_connection *c) {
2167 if(!reset_connection(c)) {
2176 * One call to this function will loop through all connections,
2177 * checking if something needs to be resent or not.
2178 * The return value is the time to the next timeout in milliseconds,
2179 * or maybe a negative value if the timeout is infinite.
2181 struct timespec utcp_timeout(struct utcp *utcp) {
2182 struct timespec now;
2183 clock_gettime(UTCP_CLOCK, &now);
2184 struct timespec next = {now.tv_sec + 3600, now.tv_nsec};
2186 for(int i = 0; i < utcp->nconnections; i++) {
2187 struct utcp_connection *c = utcp->connections[i];
2193 // delete connections that have been utcp_close()d.
2194 if(c->state == CLOSED) {
2196 debug(c, "reaping\n");
2204 if(timespec_isset(&c->conn_timeout) && timespec_lt(&c->conn_timeout, &now)) {
2207 buffer_clear(&c->sndbuf);
2208 buffer_clear(&c->rcvbuf);
2211 c->recv(c, NULL, 0);
2214 if(c->poll && !c->reapable) {
2221 if(timespec_isset(&c->rtrx_timeout) && timespec_lt(&c->rtrx_timeout, &now)) {
2222 debug(c, "retransmitting after timeout\n");
2227 if((c->state == ESTABLISHED || c->state == CLOSE_WAIT) && c->do_poll) {
2229 uint32_t len = buffer_free(&c->sndbuf);
2234 } else if(c->state == CLOSED) {
2239 if(timespec_isset(&c->conn_timeout) && timespec_lt(&c->conn_timeout, &next)) {
2240 next = c->conn_timeout;
2243 if(timespec_isset(&c->rtrx_timeout) && timespec_lt(&c->rtrx_timeout, &next)) {
2244 next = c->rtrx_timeout;
2248 struct timespec diff;
2250 timespec_sub(&next, &now, &diff);
2255 bool utcp_is_active(struct utcp *utcp) {
2260 for(int i = 0; i < utcp->nconnections; i++)
2261 if(utcp->connections[i]->state != CLOSED && utcp->connections[i]->state != TIME_WAIT) {
2268 struct utcp *utcp_init(utcp_accept_t accept, utcp_listen_t listen, utcp_send_t send, void *priv) {
2274 struct utcp *utcp = calloc(1, sizeof(*utcp));
2280 utcp_set_mtu(utcp, DEFAULT_MTU);
2287 if(!CLOCK_GRANULARITY) {
2288 struct timespec res;
2289 clock_getres(UTCP_CLOCK, &res);
2290 CLOCK_GRANULARITY = res.tv_sec * USEC_PER_SEC + res.tv_nsec / 1000;
2293 utcp->accept = accept;
2294 utcp->listen = listen;
2297 utcp->timeout = DEFAULT_USER_TIMEOUT; // sec
2302 void utcp_exit(struct utcp *utcp) {
2307 for(int i = 0; i < utcp->nconnections; i++) {
2308 struct utcp_connection *c = utcp->connections[i];
2311 buffer_clear(&c->sndbuf);
2312 buffer_clear(&c->rcvbuf);
2315 c->recv(c, NULL, 0);
2318 if(c->poll && !c->reapable) {
2323 buffer_exit(&c->rcvbuf);
2324 buffer_exit(&c->sndbuf);
2328 free(utcp->connections);
2333 uint16_t utcp_get_mtu(struct utcp *utcp) {
2334 return utcp ? utcp->mtu : 0;
2337 uint16_t utcp_get_mss(struct utcp *utcp) {
2338 return utcp ? utcp->mss : 0;
2341 void utcp_set_mtu(struct utcp *utcp, uint16_t mtu) {
2346 if(mtu <= sizeof(struct hdr)) {
2350 if(mtu > utcp->mtu) {
2351 char *new = realloc(utcp->pkt, mtu + sizeof(struct hdr));
2361 utcp->mss = mtu - sizeof(struct hdr);
2364 void utcp_reset_timers(struct utcp *utcp) {
2369 struct timespec now, then;
2371 clock_gettime(UTCP_CLOCK, &now);
2375 then.tv_sec += utcp->timeout;
2377 for(int i = 0; i < utcp->nconnections; i++) {
2378 struct utcp_connection *c = utcp->connections[i];
2384 if(timespec_isset(&c->rtrx_timeout)) {
2385 c->rtrx_timeout = now;
2388 if(timespec_isset(&c->conn_timeout)) {
2389 c->conn_timeout = then;
2392 c->rtt_start.tv_sec = 0;
2394 if(c->rto > START_RTO) {
2400 int utcp_get_user_timeout(struct utcp *u) {
2401 return u ? u->timeout : 0;
2404 void utcp_set_user_timeout(struct utcp *u, int timeout) {
2406 u->timeout = timeout;
2410 size_t utcp_get_sndbuf(struct utcp_connection *c) {
2411 return c ? c->sndbuf.maxsize : 0;
2414 size_t utcp_get_sndbuf_free(struct utcp_connection *c) {
2424 return buffer_free(&c->sndbuf);
2431 void utcp_set_sndbuf(struct utcp_connection *c, void *data, size_t size) {
2436 set_buffer_storage(&c->sndbuf, data, size);
2438 c->do_poll = is_reliable(c) && buffer_free(&c->sndbuf);
2441 size_t utcp_get_rcvbuf(struct utcp_connection *c) {
2442 return c ? c->rcvbuf.maxsize : 0;
2445 size_t utcp_get_rcvbuf_free(struct utcp_connection *c) {
2446 if(c && (c->state == ESTABLISHED || c->state == CLOSE_WAIT)) {
2447 return buffer_free(&c->rcvbuf);
2453 void utcp_set_rcvbuf(struct utcp_connection *c, void *data, size_t size) {
2458 set_buffer_storage(&c->rcvbuf, data, size);
2461 size_t utcp_get_sendq(struct utcp_connection *c) {
2462 return c->sndbuf.used;
2465 size_t utcp_get_recvq(struct utcp_connection *c) {
2466 return c->rcvbuf.used;
2469 bool utcp_get_nodelay(struct utcp_connection *c) {
2470 return c ? c->nodelay : false;
2473 void utcp_set_nodelay(struct utcp_connection *c, bool nodelay) {
2475 c->nodelay = nodelay;
2479 bool utcp_get_keepalive(struct utcp_connection *c) {
2480 return c ? c->keepalive : false;
2483 void utcp_set_keepalive(struct utcp_connection *c, bool keepalive) {
2485 c->keepalive = keepalive;
2489 size_t utcp_get_outq(struct utcp_connection *c) {
2490 return c ? seqdiff(c->snd.nxt, c->snd.una) : 0;
2493 void utcp_set_recv_cb(struct utcp_connection *c, utcp_recv_t recv) {
2499 void utcp_set_poll_cb(struct utcp_connection *c, utcp_poll_t poll) {
2502 c->do_poll = is_reliable(c) && buffer_free(&c->sndbuf);
2506 void utcp_set_accept_cb(struct utcp *utcp, utcp_accept_t accept, utcp_listen_t listen) {
2508 utcp->accept = accept;
2509 utcp->listen = listen;
2513 void utcp_expect_data(struct utcp_connection *c, bool expect) {
2514 if(!c || c->reapable) {
2518 if(!(c->state == ESTABLISHED || c->state == FIN_WAIT_1 || c->state == FIN_WAIT_2)) {
2523 // If we expect data, start the connection timer.
2524 if(!timespec_isset(&c->conn_timeout)) {
2525 clock_gettime(UTCP_CLOCK, &c->conn_timeout);
2526 c->conn_timeout.tv_sec += c->utcp->timeout;
2529 // If we want to cancel expecting data, only clear the timer when there is no unACKed data.
2530 if(c->snd.una == c->snd.last) {
2531 timespec_clear(&c->conn_timeout);
2536 void utcp_set_flags(struct utcp_connection *c, uint32_t flags) {
2537 c->flags &= ~UTCP_CHANGEABLE_FLAGS;
2538 c->flags |= flags & UTCP_CHANGEABLE_FLAGS;
2541 void utcp_offline(struct utcp *utcp, bool offline) {
2542 struct timespec now;
2543 clock_gettime(UTCP_CLOCK, &now);
2545 for(int i = 0; i < utcp->nconnections; i++) {
2546 struct utcp_connection *c = utcp->connections[i];
2552 utcp_expect_data(c, offline);
2555 if(timespec_isset(&c->rtrx_timeout)) {
2556 c->rtrx_timeout = now;
2559 utcp->connections[i]->rtt_start.tv_sec = 0;
2561 if(c->rto > START_RTO) {
2568 void utcp_set_retransmit_cb(struct utcp *utcp, utcp_retransmit_t cb) {
2569 utcp->retransmit = cb;
2572 void utcp_set_clock_granularity(long granularity) {
2573 CLOCK_GRANULARITY = granularity;