static void timespec_clear(struct timespec *a) {
a->tv_sec = 0;
+ a->tv_nsec = 0;
}
static bool timespec_isset(const struct timespec *a) {
}
static uint32_t buffer_free(const struct buffer *buf) {
- return buf->maxsize - buf->used;
+ return buf->maxsize > buf->used ? buf->maxsize - buf->used : 0;
}
// Connections are stored in a sorted list.
rto -= USEC_PER_SEC;
}
- c->rtrx_timeout.tv_nsec += c->rto * 1000;
+ c->rtrx_timeout.tv_nsec += rto * 1000;
if(c->rtrx_timeout.tv_nsec >= NSEC_PER_SEC) {
c->rtrx_timeout.tv_nsec -= NSEC_PER_SEC;
if(!is_reliable(c)) {
c->snd.una = c->snd.nxt = c->snd.last;
buffer_discard(&c->sndbuf, c->sndbuf.used);
- c->do_poll = true;
}
if(is_reliable(c) && !timespec_isset(&c->rtrx_timeout)) {
// Packet loss or reordering occured. Store the data in the buffer.
ssize_t rxd = buffer_put_at(&c->rcvbuf, offset, data, len);
- if(rxd < 0 || (size_t)rxd < len) {
- abort();
+ if(rxd <= 0) {
+ debug(c, "packet outside receive buffer, dropping\n");
+ return;
+ }
+
+ if((size_t)rxd < len) {
+ debug(c, "packet partially outside receive buffer\n");
+ len = rxd;
}
// Make note of where we put it.
size_t offset = len;
len = c->sacks[0].offset + c->sacks[0].len;
size_t remainder = len - offset;
- ssize_t rxd = buffer_call(&c->rcvbuf, c->recv, c, offset, remainder);
- if(rxd != (ssize_t)remainder) {
- // TODO: handle the application not accepting all data.
- abort();
+ if(c->recv) {
+ ssize_t rxd = buffer_call(&c->rcvbuf, c->recv, c, offset, remainder);
+
+ if(rxd != (ssize_t)remainder) {
+ // TODO: handle the application not accepting all data.
+ abort();
+ }
}
}
}
static void handle_unreliable(struct utcp_connection *c, const struct hdr *hdr, const void *data, size_t len) {
// Fast path for unfragmented packets
if(!hdr->wnd && !(hdr->ctl & MF)) {
- c->recv(c, data, len);
+ if(c->recv) {
+ c->recv(c, data, len);
+ }
+
c->rcv.nxt = hdr->seq + len;
return;
}
}
// Send the packet if it's the final fragment
- if(!(hdr->ctl & MF)) {
+ if(!(hdr->ctl & MF) && c->recv) {
buffer_call(&c->rcvbuf, c->recv, c, 0, hdr->wnd + len);
}
uint32_t offset = seqdiff(hdr->seq, c->rcv.nxt);
- if(offset + len > c->rcvbuf.maxsize) {
- abort();
- }
-
if(offset) {
handle_out_of_order(c, offset, data, len);
} else {
if(data_acked) {
buffer_discard(&c->sndbuf, data_acked);
- c->do_poll = true;
+
+ if(is_reliable(c)) {
+ c->do_poll = true;
+ }
}
// Also advance snd.nxt if possible
c->sndbuf.maxsize = -1;
}
- c->do_poll = buffer_free(&c->sndbuf);
+ c->do_poll = is_reliable(c) && buffer_free(&c->sndbuf);
}
size_t utcp_get_rcvbuf(struct utcp_connection *c) {
void utcp_set_poll_cb(struct utcp_connection *c, utcp_poll_t poll) {
if(c) {
c->poll = poll;
- c->do_poll = buffer_free(&c->sndbuf);
+ c->do_poll = is_reliable(c) && buffer_free(&c->sndbuf);
}
}