}
static int32_t timespec_diff_usec(const struct timespec *a, const struct timespec *b) {
- int64_t diff = (a->tv_sec - b->tv_sec) * 1000000000 + a->tv_sec - b->tv_sec;
- return diff / 1000;
+ return (a->tv_sec - b->tv_sec) * 1000000 + (a->tv_nsec - b->tv_nsec) / 1000;
}
static bool timespec_lt(const struct timespec *a, const struct timespec *b) {
static void timespec_clear(struct timespec *a) {
a->tv_sec = 0;
+ a->tv_nsec = 0;
}
static bool timespec_isset(const struct timespec *a) {
}
static uint32_t buffer_free(const struct buffer *buf) {
- return buf->maxsize - buf->used;
+ return buf->maxsize > buf->used ? buf->maxsize - buf->used : 0;
}
// Connections are stored in a sorted list.
c->snd.cwnd = (utcp->mss > 2190 ? 2 : utcp->mss > 1095 ? 3 : 4) * utcp->mss;
c->snd.ssthresh = ~0;
debug_cwnd(c);
+ c->srtt = 0;
+ c->rttvar = 0;
+ c->rto = START_RTO;
c->utcp = utcp;
// Add it to the sorted list of connections
return;
}
- struct utcp *utcp = c->utcp;
-
- if(!utcp->srtt) {
- utcp->srtt = rtt;
- utcp->rttvar = rtt / 2;
+ if(!c->srtt) {
+ c->srtt = rtt;
+ c->rttvar = rtt / 2;
} else {
- utcp->rttvar = (utcp->rttvar * 3 + absdiff(utcp->srtt, rtt)) / 4;
- utcp->srtt = (utcp->srtt * 7 + rtt) / 8;
+ c->rttvar = (c->rttvar * 3 + absdiff(c->srtt, rtt)) / 4;
+ c->srtt = (c->srtt * 7 + rtt) / 8;
}
- utcp->rto = utcp->srtt + max(4 * utcp->rttvar, CLOCK_GRANULARITY);
+ c->rto = c->srtt + max(4 * c->rttvar, CLOCK_GRANULARITY);
- if(utcp->rto > MAX_RTO) {
- utcp->rto = MAX_RTO;
+ if(c->rto > MAX_RTO) {
+ c->rto = MAX_RTO;
}
- debug(c, "rtt %u srtt %u rttvar %u rto %u\n", rtt, utcp->srtt, utcp->rttvar, utcp->rto);
+ debug(c, "rtt %u srtt %u rttvar %u rto %u\n", rtt, c->srtt, c->rttvar, c->rto);
}
static void start_retransmit_timer(struct utcp_connection *c) {
clock_gettime(UTCP_CLOCK, &c->rtrx_timeout);
- uint32_t rto = c->utcp->rto;
+ uint32_t rto = c->rto;
while(rto > USEC_PER_SEC) {
c->rtrx_timeout.tv_sec++;
rto -= USEC_PER_SEC;
}
- c->rtrx_timeout.tv_nsec += c->utcp->rto * 1000;
+ c->rtrx_timeout.tv_nsec += rto * 1000;
if(c->rtrx_timeout.tv_nsec >= NSEC_PER_SEC) {
c->rtrx_timeout.tv_nsec -= NSEC_PER_SEC;
if(!is_reliable(c)) {
c->snd.una = c->snd.nxt = c->snd.last;
buffer_discard(&c->sndbuf, c->sndbuf.used);
- c->do_poll = true;
}
if(is_reliable(c) && !timespec_isset(&c->rtrx_timeout)) {
}
start_retransmit_timer(c);
- utcp->rto *= 2;
+ c->rto *= 2;
- if(utcp->rto > MAX_RTO) {
- utcp->rto = MAX_RTO;
+ if(c->rto > MAX_RTO) {
+ c->rto = MAX_RTO;
}
c->rtt_start.tv_sec = 0; // invalidate RTT timer
// Packet loss or reordering occured. Store the data in the buffer.
ssize_t rxd = buffer_put_at(&c->rcvbuf, offset, data, len);
- if(rxd < 0 || (size_t)rxd < len) {
- abort();
+ if(rxd <= 0) {
+ debug(c, "packet outside receive buffer, dropping\n");
+ return;
+ }
+
+ if((size_t)rxd < len) {
+ debug(c, "packet partially outside receive buffer\n");
+ len = rxd;
}
// Make note of where we put it.
size_t offset = len;
len = c->sacks[0].offset + c->sacks[0].len;
size_t remainder = len - offset;
- ssize_t rxd = buffer_call(&c->rcvbuf, c->recv, c, offset, remainder);
- if(rxd != (ssize_t)remainder) {
- // TODO: handle the application not accepting all data.
- abort();
+ if(c->recv) {
+ ssize_t rxd = buffer_call(&c->rcvbuf, c->recv, c, offset, remainder);
+
+ if(rxd != (ssize_t)remainder) {
+ // TODO: handle the application not accepting all data.
+ abort();
+ }
}
}
}
static void handle_unreliable(struct utcp_connection *c, const struct hdr *hdr, const void *data, size_t len) {
// Fast path for unfragmented packets
if(!hdr->wnd && !(hdr->ctl & MF)) {
- c->recv(c, data, len);
+ if(c->recv) {
+ c->recv(c, data, len);
+ }
+
c->rcv.nxt = hdr->seq + len;
return;
}
}
// Send the packet if it's the final fragment
- if(!(hdr->ctl & MF)) {
+ if(!(hdr->ctl & MF) && c->recv) {
buffer_call(&c->rcvbuf, c->recv, c, 0, hdr->wnd + len);
}
uint32_t offset = seqdiff(hdr->seq, c->rcv.nxt);
- if(offset + len > c->rcvbuf.maxsize) {
- abort();
- }
-
if(offset) {
handle_out_of_order(c, offset, data, len);
} else {
if(data_acked) {
buffer_discard(&c->sndbuf, data_acked);
- c->do_poll = true;
+
+ if(is_reliable(c)) {
+ c->do_poll = true;
+ }
}
// Also advance snd.nxt if possible
utcp->priv = priv;
utcp_set_mtu(utcp, DEFAULT_MTU);
utcp->timeout = DEFAULT_USER_TIMEOUT; // sec
- utcp->rto = START_RTO; // usec
return utcp;
}
}
c->rtt_start.tv_sec = 0;
- }
- if(utcp->rto > START_RTO) {
- utcp->rto = START_RTO;
+ if(c->rto > START_RTO) {
+ c->rto = START_RTO;
+ }
}
}
c->sndbuf.maxsize = -1;
}
- c->do_poll = buffer_free(&c->sndbuf);
+ c->do_poll = is_reliable(c) && buffer_free(&c->sndbuf);
}
size_t utcp_get_rcvbuf(struct utcp_connection *c) {
void utcp_set_poll_cb(struct utcp_connection *c, utcp_poll_t poll) {
if(c) {
c->poll = poll;
- c->do_poll = buffer_free(&c->sndbuf);
+ c->do_poll = is_reliable(c) && buffer_free(&c->sndbuf);
}
}
}
utcp->connections[i]->rtt_start.tv_sec = 0;
- }
- }
- if(!offline && utcp->rto > START_RTO) {
- utcp->rto = START_RTO;
+ if(c->rto > START_RTO) {
+ c->rto = START_RTO;
+ }
+ }
}
}