#undef poll
#endif
+#ifndef UTCP_CLOCK
+#if defined(CLOCK_MONOTONIC_RAW) && defined(__x86_64__)
+#define UTCP_CLOCK CLOCK_MONOTONIC_RAW
+#else
+#define UTCP_CLOCK CLOCK_MONOTONIC
+#endif
+#endif
+
static void timespec_sub(const struct timespec *a, const struct timespec *b, struct timespec *r) {
r->tv_sec = a->tv_sec - b->tv_sec;
r->tv_nsec = a->tv_nsec - b->tv_nsec;
#define UTCP_DEBUG_DATALEN 20
#endif
-#ifndef UTCP_CLOCK
-#if defined(CLOCK_MONOTONIC_RAW) && defined(__x86_64__)
-#define UTCP_CLOCK CLOCK_MONOTONIC_RAW
-#else
-#define UTCP_CLOCK CLOCK_MONOTONIC
-#endif
-#endif
-
static void debug(struct utcp_connection *c, const char *format, ...) {
struct timespec tv;
char buf[1024];
// [345.........|........012]
uint32_t tailsize = buf->size - buf->offset;
uint32_t newoffset = newsize - tailsize;
- memmove(buf + newoffset, buf + buf->offset, tailsize);
+ memmove(buf->data + newoffset, buf->data + buf->offset, tailsize);
buf->offset = newoffset;
}
if(!is_reliable(c)) {
c->snd.una = c->snd.nxt = c->snd.last;
buffer_discard(&c->sndbuf, c->sndbuf.used);
+ c->do_poll = true;
}
if(is_reliable(c) && !timespec_isset(&c->rtrx_timeout)) {
if(data_acked) {
buffer_discard(&c->sndbuf, data_acked);
+ c->do_poll = true;
}
// Also advance snd.nxt if possible
}
if(c->poll) {
- if((c->state == ESTABLISHED || c->state == CLOSE_WAIT)) {
- uint32_t len = buffer_free(&c->sndbuf);
+ if((c->state == ESTABLISHED || c->state == CLOSE_WAIT) && c->do_poll) {
+ c->do_poll = false;
+ uint32_t len = buffer_free(&c->sndbuf);
if(len) {
c->poll(c, len);
if(c->sndbuf.maxsize != size) {
c->sndbuf.maxsize = -1;
}
+
+ c->do_poll = buffer_free(&c->sndbuf);
}
size_t utcp_get_rcvbuf(struct utcp_connection *c) {
void utcp_set_poll_cb(struct utcp_connection *c, utcp_poll_t poll) {
if(c) {
c->poll = poll;
+ c->do_poll = buffer_free(&c->sndbuf);
}
}