return a->tv_sec;
}
-static long CLOCK_GRANULARITY;
+static long CLOCK_GRANULARITY; // usec
static inline size_t min(size_t a, size_t b) {
return a < b ? a : b;
if(!is_reliable(c)) {
c->snd.una = c->snd.nxt = c->snd.last;
buffer_discard(&c->sndbuf, c->sndbuf.used);
+ c->do_poll = true;
}
if(is_reliable(c) && !timespec_isset(&c->rtrx_timeout)) {
if(data_acked) {
buffer_discard(&c->sndbuf, data_acked);
+ c->do_poll = true;
}
// Also advance snd.nxt if possible
}
if(c->poll) {
- if((c->state == ESTABLISHED || c->state == CLOSE_WAIT)) {
- uint32_t len = buffer_free(&c->sndbuf);
+ if((c->state == ESTABLISHED || c->state == CLOSE_WAIT) && c->do_poll) {
+ c->do_poll = false;
+ uint32_t len = buffer_free(&c->sndbuf);
if(len) {
c->poll(c, len);
if(!CLOCK_GRANULARITY) {
struct timespec res;
clock_getres(UTCP_CLOCK, &res);
- CLOCK_GRANULARITY = res.tv_sec * NSEC_PER_SEC + res.tv_nsec;
+ CLOCK_GRANULARITY = res.tv_sec * USEC_PER_SEC + res.tv_nsec / 1000;
}
utcp->accept = accept;
if(c->sndbuf.maxsize != size) {
c->sndbuf.maxsize = -1;
}
+
+ c->do_poll = buffer_free(&c->sndbuf);
}
size_t utcp_get_rcvbuf(struct utcp_connection *c) {
void utcp_set_poll_cb(struct utcp_connection *c, utcp_poll_t poll) {
if(c) {
c->poll = poll;
+ c->do_poll = buffer_free(&c->sndbuf);
}
}
utcp->rto = START_RTO;
}
}
+
+void utcp_set_clock_granularity(long granularity) {
+ CLOCK_GRANULARITY = granularity;
+}