} while (0)
#endif
+static inline size_t max(size_t a, size_t b) {
+ return a > b ? a : b;
+}
+
#ifdef UTCP_DEBUG
#include <stdarg.h>
debug("ACK");
if(len > sizeof hdr) {
- debug(" data=");
- for(int i = sizeof hdr; i < len; i++) {
- const char *data = pkt;
- debug("%c", data[i] >= 32 ? data[i] : '.');
+ uint32_t datalen = len - sizeof hdr;
+ uint8_t *str = malloc((datalen << 1) + 7);
+ if(!str) {
+ debug("out of memory");
+ return;
+ }
+ memcpy(str, " data=", 6);
+ uint8_t *strptr = str + 6;
+ const uint8_t *data = pkt;
+ const uint8_t *dataend = data + datalen;
+
+ while(data != dataend) {
+ *strptr = (*data >> 4) > 9? (*data >> 4) + 55 : (*data >> 4) + 48;
+ ++strptr;
+ *strptr = (*data & 0xf) > 9? (*data & 0xf) + 55 : (*data & 0xf) + 48;
+ ++strptr;
+ ++data;
}
+ *strptr = 0;
+
+ debug(str);
+ free(str);
}
debug("\n");
// TODO: convert to ringbuffers to avoid memmove() operations.
// Store data into the buffer
-static ssize_t buffer_put(struct buffer *buf, const void *data, size_t len) {
+static ssize_t buffer_put_at(struct buffer *buf, size_t offset, const void *data, size_t len) {
if(buf->maxsize <= buf->used)
return 0;
- if(len > buf->maxsize - buf->used)
- len = buf->maxsize - buf->used;
- if(len > buf->size - buf->used) {
+
+ debug("buffer_put_at %zu %zu %zu\n", buf->used, offset, len);
+
+ size_t required = offset + len;
+ if(required > buf->maxsize) {
+ if(offset >= buf->maxsize)
+ return 0;
+ abort();
+ len = buf->maxsize - offset;
+ required = buf->maxsize;
+ }
+
+ if(required > buf->size) {
size_t newsize = buf->size;
- do {
- newsize *= 2;
- } while(newsize < buf->used + len);
+ if(!newsize) {
+ newsize = required;
+ } else {
+ do {
+ newsize *= 2;
+ } while(newsize < buf->used + len);
+ }
if(newsize > buf->maxsize)
newsize = buf->maxsize;
char *newdata = realloc(buf->data, newsize);
buf->data = newdata;
buf->size = newsize;
}
- memcpy(buf->data + buf->used, data, len);
- buf->used += len;
+
+ memcpy(buf->data + offset, data, len);
+ if(required > buf->used)
+ buf->used = required;
return len;
}
+static ssize_t buffer_put(struct buffer *buf, const void *data, size_t len) {
+ return buffer_put_at(buf, buf->used, data, len);
+}
+
// Get data from the buffer. data can be NULL.
static ssize_t buffer_get(struct buffer *buf, void *data, size_t len) {
if(len > buf->used)
static bool buffer_init(struct buffer *buf, uint32_t len, uint32_t maxlen) {
memset(buf, 0, sizeof *buf);
- buf->data = malloc(len);
- if(!len)
- return false;
+ if(len) {
+ buf->data = malloc(len);
+ if(!buf->data)
+ return false;
+ }
buf->size = len;
buf->maxsize = maxlen;
return true;
memmove(cp, cp + 1, (utcp->nconnections - i - 1) * sizeof *cp);
utcp->nconnections--;
+ buffer_exit(&c->rcvbuf);
buffer_exit(&c->sndbuf);
free(c);
}
return NULL;
}
+ if(!buffer_init(&c->rcvbuf, DEFAULT_RCVBUFSIZE, DEFAULT_MAXRCVBUFSIZE)) {
+ buffer_exit(&c->sndbuf);
+ free(c);
+ return NULL;
+ }
+
// Fill in the details
c->src = src;
c->dst = dst;
+#ifdef UTCP_DEBUG
+ c->snd.iss = 0;
+#else
c->snd.iss = rand();
+#endif
c->snd.una = c->snd.iss;
c->snd.nxt = c->snd.iss + 1;
c->rcv.wnd = utcp->mtu;
return c;
}
+// Update RTT variables. See RFC 6298.
+static void update_rtt(struct utcp_connection *c, uint32_t rtt) {
+ if(!rtt) {
+ debug("invalid rtt\n");
+ return;
+ }
+
+ struct utcp *utcp = c->utcp;
+
+ if(!utcp->srtt) {
+ utcp->srtt = rtt;
+ utcp->rttvar = rtt / 2;
+ utcp->rto = rtt + max(2 * rtt, CLOCK_GRANULARITY);
+ } else {
+ utcp->rttvar = (utcp->rttvar * 3 + abs(utcp->srtt - rtt)) / 4;
+ utcp->srtt = (utcp->srtt * 7 + rtt) / 8;
+ utcp->rto = utcp->srtt + max(utcp->rttvar, CLOCK_GRANULARITY);
+ }
+
+ if(utcp->rto > MAX_RTO)
+ utcp->rto = MAX_RTO;
+
+ debug("rtt %u srtt %u rttvar %u rto %u\n", rtt, utcp->srtt, utcp->rttvar, utcp->rto);
+}
+
+static void start_retransmit_timer(struct utcp_connection *c) {
+ gettimeofday(&c->rtrx_timeout, NULL);
+ c->rtrx_timeout.tv_usec += c->utcp->rto;
+ while(c->rtrx_timeout.tv_usec >= 1000000) {
+ c->rtrx_timeout.tv_usec -= 1000000;
+ c->rtrx_timeout.tv_sec++;
+ }
+ debug("timeout set to %lu.%06lu (%u)\n", c->rtrx_timeout.tv_sec, c->rtrx_timeout.tv_usec, c->utcp->rto);
+}
+
+static void stop_retransmit_timer(struct utcp_connection *c) {
+ timerclear(&c->rtrx_timeout);
+ debug("timeout cleared\n");
+}
+
struct utcp_connection *utcp_connect(struct utcp *utcp, uint16_t dst, utcp_recv_t recv, void *priv) {
struct utcp_connection *c = allocate_connection(utcp, 0, dst);
if(!c)
static void ack(struct utcp_connection *c, bool sendatleastone) {
int32_t left = seqdiff(c->snd.last, c->snd.nxt);
int32_t cwndleft = c->snd.cwnd - seqdiff(c->snd.nxt, c->snd.una);
+ debug("cwndleft = %d\n", cwndleft);
assert(left >= 0);
} *pkt;
pkt = malloc(sizeof pkt->hdr + c->utcp->mtu);
- if(!pkt->data)
+ if(!pkt)
return;
pkt->hdr.src = c->src;
pkt->hdr.ctl |= FIN;
}
+ if(!c->rtt_start.tv_sec) {
+ // Start RTT measurement
+ gettimeofday(&c->rtt_start, NULL);
+ c->rtt_seq = pkt->hdr.seq + seglen;
+ debug("Starting RTT measurement, expecting ack %u\n", c->rtt_seq);
+ }
+
print_packet(c->utcp, "send", pkt, sizeof pkt->hdr + seglen);
c->utcp->send(c->utcp, pkt, sizeof pkt->hdr + seglen);
} while(left);
c->snd.last += len;
ack(c, false);
+ if(!timerisset(&c->rtrx_timeout))
+ start_retransmit_timer(c);
return len;
}
}
static void retransmit(struct utcp_connection *c) {
- if(c->state == CLOSED || c->snd.nxt == c->snd.una)
+ if(c->state == CLOSED || c->snd.last == c->snd.una) {
+ debug("Retransmit() called but nothing to retransmit!\n");
+ stop_retransmit_timer(c);
return;
+ }
struct utcp *utcp = c->utcp;
pkt->hdr.src = c->src;
pkt->hdr.dst = c->dst;
+ pkt->hdr.wnd = c->rcv.wnd;
+ pkt->hdr.aux = 0;
switch(c->state) {
case SYN_SENT:
// Send our SYN again
pkt->hdr.seq = c->snd.iss;
pkt->hdr.ack = 0;
- pkt->hdr.wnd = c->rcv.wnd;
pkt->hdr.ctl = SYN;
print_packet(c->utcp, "rtrx", pkt, sizeof pkt->hdr);
utcp->send(utcp, pkt, sizeof pkt->hdr);
len--;
pkt->hdr.ctl |= FIN;
}
+ c->snd.nxt = c->snd.una + len;
+ c->snd.cwnd = utcp->mtu; // reduce cwnd on retransmit
buffer_copy(&c->sndbuf, pkt->data, 0, len);
print_packet(c->utcp, "rtrx", pkt, sizeof pkt->hdr + len);
utcp->send(utcp, pkt, sizeof pkt->hdr + len);
#ifdef UTCP_DEBUG
abort();
#endif
- timerclear(&c->rtrx_timeout);
- break;
+ stop_retransmit_timer(c);
+ goto cleanup;
}
+ start_retransmit_timer(c);
+ utcp->rto *= 2;
+ if(utcp->rto > MAX_RTO)
+ utcp->rto = MAX_RTO;
+ c->rtt_start.tv_sec = 0; // invalidate RTT timer
+
+cleanup:
free(pkt);
}
+/* Update receive buffer and SACK entries after consuming data.
+ *
+ * Situation:
+ *
+ * |.....0000..1111111111.....22222......3333|
+ * |---------------^
+ *
+ * 0..3 represent the SACK entries. The ^ indicates up to which point we want
+ * to remove data from the receive buffer. The idea is to substract "len"
+ * from the offset of all the SACK entries, and then remove/cut down entries
+ * that are shifted to before the start of the receive buffer.
+ *
+ * There are three cases:
+ * - the SACK entry is ahead of ^, in that case just change the offset.
+ * - the SACK entry starts before and ends after ^, so we have to
+ * change both its offset and size.
+ * - the SACK entry is completely before ^, in that case delete it.
+ */
+static void sack_consume(struct utcp_connection *c, size_t len) {
+ debug("sack_consume %zu\n", len);
+ if(len > c->rcvbuf.used)
+ abort();
+
+ buffer_get(&c->rcvbuf, NULL, len);
+
+ for(int i = 0; i < NSACKS && c->sacks[i].len; ) {
+ if(len < c->sacks[i].offset) {
+ c->sacks[i].offset -= len;
+ i++;
+ } else if(len < c->sacks[i].offset + c->sacks[i].len) {
+ c->sacks[i].len -= len - c->sacks[i].offset;
+ c->sacks[i].offset = 0;
+ i++;
+ } else {
+ if(i < NSACKS - 1) {
+ memmove(&c->sacks[i], &c->sacks[i + 1], (NSACKS - 1 - i) * sizeof c->sacks[i]);
+ c->sacks[NSACKS - 1].len = 0;
+ } else {
+ c->sacks[i].len = 0;
+ break;
+ }
+ }
+ }
+
+ for(int i = 0; i < NSACKS && c->sacks[i].len; i++)
+ debug("SACK[%d] offset %u len %u\n", i, c->sacks[i].offset, c->sacks[i].len);
+}
+
+static void handle_out_of_order(struct utcp_connection *c, uint32_t offset, const void *data, size_t len) {
+ debug("out of order packet, offset %u\n", offset);
+ // Packet loss or reordering occured. Store the data in the buffer.
+ ssize_t rxd = buffer_put_at(&c->rcvbuf, offset, data, len);
+ if(rxd < len)
+ abort();
+
+ // Make note of where we put it.
+ for(int i = 0; i < NSACKS; i++) {
+ if(!c->sacks[i].len) { // nothing to merge, add new entry
+ debug("New SACK entry %d\n", i);
+ c->sacks[i].offset = offset;
+ c->sacks[i].len = rxd;
+ break;
+ } else if(offset < c->sacks[i].offset) {
+ if(offset + rxd < c->sacks[i].offset) { // insert before
+ if(!c->sacks[NSACKS - 1].len) { // only if room left
+ debug("Insert SACK entry at %d\n", i);
+ memmove(&c->sacks[i + 1], &c->sacks[i], (NSACKS - i - 1) * sizeof c->sacks[i]);
+ c->sacks[i].offset = offset;
+ c->sacks[i].len = rxd;
+ }
+ break;
+ } else { // merge
+ debug("Merge with start of SACK entry at %d\n", i);
+ c->sacks[i].offset = offset;
+ break;
+ }
+ } else if(offset <= c->sacks[i].offset + c->sacks[i].len) {
+ if(offset + rxd > c->sacks[i].offset + c->sacks[i].len) { // merge
+ debug("Merge with end of SACK entry at %d\n", i);
+ c->sacks[i].len = offset + rxd - c->sacks[i].offset;
+ // TODO: handle potential merge with next entry
+ }
+ break;
+ }
+ }
+
+ for(int i = 0; i < NSACKS && c->sacks[i].len; i++)
+ debug("SACK[%d] offset %u len %u\n", i, c->sacks[i].offset, c->sacks[i].len);
+}
+
+static void handle_in_order(struct utcp_connection *c, const void *data, size_t len) {
+ // Check if we can process out-of-order data now.
+ if(c->sacks[0].len && len >= c->sacks[0].offset) { // TODO: handle overlap with second SACK
+ debug("incoming packet len %zu connected with SACK at %u\n", len, c->sacks[0].offset);
+ buffer_put_at(&c->rcvbuf, 0, data, len); // TODO: handle return value
+ len = max(len, c->sacks[0].offset + c->sacks[0].len);
+ data = c->rcvbuf.data;
+ }
+
+ if(c->recv) {
+ ssize_t rxd = c->recv(c, data, len);
+ if(rxd != len) {
+ // TODO: handle the application not accepting all data.
+ abort();
+ }
+ }
+
+ if(c->rcvbuf.used)
+ sack_consume(c, len);
+
+ c->rcv.nxt += len;
+}
+
+
+static void handle_incoming_data(struct utcp_connection *c, uint32_t seq, const void *data, size_t len) {
+ uint32_t offset = seqdiff(seq, c->rcv.nxt);
+ if(offset + len > c->rcvbuf.maxsize)
+ abort();
+
+ if(offset)
+ handle_out_of_order(c, offset, data, len);
+ else
+ handle_in_order(c, data, len);
+}
+
ssize_t utcp_recv(struct utcp *utcp, const void *data, size_t len) {
if(!utcp) {
if(c->state == SYN_SENT)
acceptable = true;
-
- // TODO: handle packets overlapping c->rcv.nxt.
-#if 0
- // Only use this when accepting out-of-order packets.
else if(len == 0)
- if(c->rcv.wnd == 0)
- acceptable = hdr.seq == c->rcv.nxt;
- else
- acceptable = (seqdiff(hdr.seq, c->rcv.nxt) >= 0 && seqdiff(hdr.seq, c->rcv.nxt + c->rcv.wnd) < 0);
- else
- if(c->rcv.wnd == 0)
- // We don't accept data when the receive window is zero.
- acceptable = false;
- else
- // Both start and end of packet must be within the receive window
- acceptable = (seqdiff(hdr.seq, c->rcv.nxt) >= 0 && seqdiff(hdr.seq, c->rcv.nxt + c->rcv.wnd) < 0)
- || (seqdiff(hdr.seq + len + 1, c->rcv.nxt) >= 0 && seqdiff(hdr.seq + len - 1, c->rcv.nxt + c->rcv.wnd) < 0);
-#else
- if(c->state != SYN_SENT)
- acceptable = hdr.seq == c->rcv.nxt;
-#endif
+ acceptable = seqdiff(hdr.seq, c->rcv.nxt) >= 0;
+ else {
+ int32_t rcv_offset = seqdiff(hdr.seq, c->rcv.nxt);
+
+ // cut already accepted front overlapping
+ if(rcv_offset < 0) {
+ acceptable = rcv_offset + len >= 0;
+ if(acceptable) {
+ data -= rcv_offset;
+ len += rcv_offset;
+ }
+ }
+
+ acceptable = seqdiff(hdr.seq, c->rcv.nxt) >= 0 && seqdiff(hdr.seq, c->rcv.nxt) + len <= c->rcvbuf.maxsize;
+ }
if(!acceptable) {
- debug("Packet not acceptable, %u <= %u + %zu < %u\n", c->rcv.nxt, hdr.seq, len, c->rcv.nxt + c->rcv.wnd);
+ debug("Packet not acceptable, %u <= %u + %zu < %u\n", c->rcv.nxt, hdr.seq, len, c->rcv.nxt + c->rcvbuf.maxsize);
// Ignore unacceptable RST packets.
if(hdr.ctl & RST)
return 0;
c->snd.wnd = hdr.wnd; // TODO: move below
// 1c. Drop packets with an invalid ACK.
- // ackno should not roll back, and it should also not be bigger than snd.nxt.
+ // ackno should not roll back, and it should also not be bigger than what we ever could have sent
+ // (= snd.una + c->sndbuf.used).
- if(hdr.ctl & ACK && (seqdiff(hdr.ack, c->snd.nxt) > 0 || seqdiff(hdr.ack, c->snd.una) < 0)) {
- debug("Packet ack seqno out of range, %u %u %u\n", hdr.ack, c->snd.una, c->snd.nxt);
+ if(hdr.ctl & ACK && (seqdiff(hdr.ack, c->snd.last) > 0 || seqdiff(hdr.ack, c->snd.una) < 0)) {
+ debug("Packet ack seqno out of range, %u <= %u < %u\n", c->snd.una, hdr.ack, c->snd.una + c->sndbuf.used);
// Ignore unacceptable RST packets.
if(hdr.ctl & RST)
return 0;
prevrcvnxt = c->rcv.nxt;
if(advanced) {
+ // RTT measurement
+ if(c->rtt_start.tv_sec) {
+ if(c->rtt_seq == hdr.ack) {
+ struct timeval now, diff;
+ gettimeofday(&now, NULL);
+ timersub(&now, &c->rtt_start, &diff);
+ update_rtt(c, diff.tv_sec * 1000000 + diff.tv_usec);
+ c->rtt_start.tv_sec = 0;
+ } else if(c->rtt_seq < hdr.ack) {
+ debug("Cancelling RTT measurement: %u < %u\n", c->rtt_seq, hdr.ack);
+ c->rtt_start.tv_sec = 0;
+ }
+ }
+
int32_t data_acked = advanced;
switch(c->state) {
if(data_acked)
buffer_get(&c->sndbuf, NULL, data_acked);
+ // Also advance snd.nxt if possible
+ if(seqdiff(c->snd.nxt, hdr.ack) < 0)
+ c->snd.nxt = hdr.ack;
+
c->snd.una = hdr.ack;
c->dupack = 0;
debug("Triplicate ACK\n");
//TODO: Resend one packet and go to fast recovery mode. See RFC 6582.
//We do a very simple variant here; reset the nxt pointer to the last acknowledged packet from the peer.
- //This will cause us to start retransmitting, but at the same speed as the incoming ACKs arrive,
- //thus preventing a drop in speed.
+ //Reset the congestion window so we wait for ACKs.
c->snd.nxt = c->snd.una;
+ c->snd.cwnd = utcp->mtu;
+ start_retransmit_timer(c);
}
}
}
if(advanced) {
timerclear(&c->conn_timeout); // It will be set anew in utcp_timeout() if c->snd.una != c->snd.nxt.
- if(c->snd.una == c->snd.nxt)
- timerclear(&c->rtrx_timeout);
+ if(c->snd.una == c->snd.last)
+ stop_retransmit_timer(c);
+ else
+ start_retransmit_timer(c);
}
// 5. Process SYN stuff
return 0;
}
- ssize_t rxd;
-
- if(c->recv) {
- rxd = c->recv(c, data, len);
- if(rxd != len) {
- // TODO: once we have a receive buffer, handle the application not accepting all data.
- abort();
- }
- if(rxd < 0)
- rxd = 0;
- else if(rxd > len)
- rxd = len; // Bad application, bad!
- } else {
- rxd = len;
- }
-
- c->rcv.nxt += len;
+ handle_incoming_data(c, hdr.seq, data, len);
}
// 7. Process FIN stuff
- if(hdr.ctl & FIN) {
+ if((hdr.ctl & FIN) && hdr.seq + len == c->rcv.nxt) {
switch(c->state) {
case SYN_SENT:
case SYN_RECEIVED:
// - or we got an ack, so we should maybe send a bit more data
// -> sendatleastone = false
-ack:
- ack(c, prevrcvnxt != c->rcv.nxt);
+ ack(c, len || prevrcvnxt != c->rcv.nxt);
return 0;
reset:
}
int utcp_shutdown(struct utcp_connection *c, int dir) {
- debug("%p shutdown %d at %u\n", c ? c->utcp : NULL, dir, c->snd.last);
+ debug("%p shutdown %d at %u\n", c ? c->utcp : NULL, dir, c ? c->snd.last : 0);
if(!c) {
errno = EFAULT;
return -1;
c->snd.last++;
ack(c, false);
+ if(!timerisset(&c->rtrx_timeout))
+ start_retransmit_timer(c);
return 0;
}
if(!c)
continue;
+ // delete connections that have been utcp_close()d.
if(c->state == CLOSED) {
if(c->reapable) {
debug("Reaping %p\n", c);
}
if(timerisset(&c->rtrx_timeout) && timercmp(&c->rtrx_timeout, &now, <)) {
+ debug("retransmit()\n");
retransmit(c);
}
if(timerisset(&c->conn_timeout) && timercmp(&c->conn_timeout, &next, <))
next = c->conn_timeout;
- if(c->snd.nxt != c->snd.una) {
- c->rtrx_timeout = now;
- c->rtrx_timeout.tv_sec++;
- } else {
- timerclear(&c->rtrx_timeout);
- }
-
if(timerisset(&c->rtrx_timeout) && timercmp(&c->rtrx_timeout, &next, <))
next = c->rtrx_timeout;
}
}
struct utcp *utcp_init(utcp_accept_t accept, utcp_pre_accept_t pre_accept, utcp_send_t send, void *priv) {
- struct utcp *utcp = calloc(1, sizeof *utcp);
- if(!utcp)
- return NULL;
-
if(!send) {
errno = EFAULT;
return NULL;
}
+ struct utcp *utcp = calloc(1, sizeof *utcp);
+ if(!utcp)
+ return NULL;
+
utcp->accept = accept;
utcp->pre_accept = pre_accept;
utcp->send = send;
utcp->priv = priv;
- utcp->mtu = 1000;
- utcp->timeout = 60;
+ utcp->mtu = DEFAULT_MTU;
+ utcp->timeout = DEFAULT_USER_TIMEOUT; // s
+ utcp->rto = START_RTO; // us
return utcp;
}
for(int i = 0; i < utcp->nconnections; i++) {
if(!utcp->connections[i]->reapable)
debug("Warning, freeing unclosed connection %p\n", utcp->connections[i]);
+ buffer_exit(&utcp->connections[i]->rcvbuf);
buffer_exit(&utcp->connections[i]->sndbuf);
free(utcp->connections[i]);
}
c->sndbuf.maxsize = -1;
}
+size_t utcp_get_rcvbuf(struct utcp_connection *c) {
+ return c ? c->rcvbuf.maxsize : 0;
+}
+
+size_t utcp_get_rcvbuf_free(struct utcp_connection *c) {
+ if(c && (c->state == ESTABLISHED || c->state == CLOSE_WAIT))
+ return buffer_free(&c->rcvbuf);
+ else
+ return 0;
+}
+
+void utcp_set_rcvbuf(struct utcp_connection *c, size_t size) {
+ if(!c)
+ return;
+ c->rcvbuf.maxsize = size;
+ if(c->rcvbuf.maxsize != size)
+ c->rcvbuf.maxsize = -1;
+}
+
bool utcp_get_nodelay(struct utcp_connection *c) {
return c ? c->nodelay : false;
}