X-Git-Url: http://git.meshlink.io/?a=blobdiff_plain;f=avahi-core%2Fquery-sched.c;h=ff833f97b95c2fe3243c5961fecbd79f148a8e8b;hb=147cdce70b22ae7cee9fb4fe123db40952f31c9e;hp=129b15e4c653ebd7d32b2be465a12c3db1d19da8;hpb=e63a65b3955b173a3e8d6b78c6377a518a9922d6;p=catta diff --git a/avahi-core/query-sched.c b/avahi-core/query-sched.c index 129b15e..ff833f9 100644 --- a/avahi-core/query-sched.c +++ b/avahi-core/query-sched.c @@ -1,18 +1,16 @@ -/* $Id$ */ - /*** This file is part of avahi. - + avahi is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. - + avahi is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. - + You should have received a copy of the GNU Lesser General Public License along with avahi; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 @@ -23,8 +21,13 @@ #include #endif +#include + +#include +#include + #include "query-sched.h" -#include "util.h" +#include "log.h" #define AVAHI_QUERY_HISTORY_MSEC 100 #define AVAHI_QUERY_DEFER_MSEC 100 @@ -33,14 +36,25 @@ typedef struct AvahiQueryJob AvahiQueryJob; typedef struct AvahiKnownAnswer AvahiKnownAnswer; struct AvahiQueryJob { + unsigned id; + int n_posted; + AvahiQueryScheduler *scheduler; AvahiTimeEvent *time_event; - - gboolean done; + + int done; struct timeval delivery; AvahiKey *key; + /* Jobs are stored in a simple linked list. It might turn out in + * the future that this list grows too long and we must switch to + * some other kind of data structure. This needs further + * investigation. I expect the list to be very short (< 20 + * entries) most of the time, but this might be a wrong + * assumption, especially on setups where traffic reflection is + * involved. */ + AVAHI_LLIST_FIELDS(AvahiQueryJob, jobs); }; @@ -55,23 +69,31 @@ struct AvahiQueryScheduler { AvahiInterface *interface; AvahiTimeEventQueue *time_event_queue; + unsigned next_id; + AVAHI_LLIST_HEAD(AvahiQueryJob, jobs); AVAHI_LLIST_HEAD(AvahiQueryJob, history); AVAHI_LLIST_HEAD(AvahiKnownAnswer, known_answers); }; -static AvahiQueryJob* job_new(AvahiQueryScheduler *s, AvahiKey *key, gboolean done) { +static AvahiQueryJob* job_new(AvahiQueryScheduler *s, AvahiKey *key, int done) { AvahiQueryJob *qj; - - g_assert(s); - g_assert(key); - qj = g_new(AvahiQueryJob, 1); + assert(s); + assert(key); + + if (!(qj = avahi_new(AvahiQueryJob, 1))) { + avahi_log_error(__FILE__": Out of memory"); + return NULL; + } + qj->scheduler = s; qj->key = avahi_key_ref(key); qj->time_event = NULL; - - if ((qj->done = done)) + qj->n_posted = 1; + qj->id = s->next_id++; + + if ((qj->done = done)) AVAHI_LLIST_PREPEND(AvahiQueryJob, jobs, s->history, qj); else AVAHI_LLIST_PREPEND(AvahiQueryJob, jobs, s->jobs, qj); @@ -80,11 +102,11 @@ static AvahiQueryJob* job_new(AvahiQueryScheduler *s, AvahiKey *key, gboolean do } static void job_free(AvahiQueryScheduler *s, AvahiQueryJob *qj) { - g_assert(s); - g_assert(qj); + assert(s); + assert(qj); if (qj->time_event) - avahi_time_event_queue_remove(s->time_event_queue, qj->time_event); + avahi_time_event_free(qj->time_event); if (qj->done) AVAHI_LLIST_REMOVE(AvahiQueryJob, jobs, s->history, qj); @@ -92,35 +114,35 @@ static void job_free(AvahiQueryScheduler *s, AvahiQueryJob *qj) { AVAHI_LLIST_REMOVE(AvahiQueryJob, jobs, s->jobs, qj); avahi_key_unref(qj->key); - g_free(qj); + avahi_free(qj); } -static void elapse_callback(AvahiTimeEvent *e, gpointer data); +static void elapse_callback(AvahiTimeEvent *e, void* data); -static void job_set_elapse_time(AvahiQueryScheduler *s, AvahiQueryJob *qj, guint msec, guint jitter) { +static void job_set_elapse_time(AvahiQueryScheduler *s, AvahiQueryJob *qj, unsigned msec, unsigned jitter) { struct timeval tv; - g_assert(s); - g_assert(qj); + assert(s); + assert(qj); avahi_elapse_time(&tv, msec, jitter); if (qj->time_event) - avahi_time_event_queue_update(s->time_event_queue, qj->time_event, &tv); + avahi_time_event_update(qj->time_event, &tv); else - qj->time_event = avahi_time_event_queue_add(s->time_event_queue, &tv, elapse_callback, qj); + qj->time_event = avahi_time_event_new(s->time_event_queue, &tv, elapse_callback, qj); } static void job_mark_done(AvahiQueryScheduler *s, AvahiQueryJob *qj) { - g_assert(s); - g_assert(qj); + assert(s); + assert(qj); - g_assert(!qj->done); + assert(!qj->done); AVAHI_LLIST_REMOVE(AvahiQueryJob, jobs, s->jobs, qj); AVAHI_LLIST_PREPEND(AvahiQueryJob, jobs, s->history, qj); - qj->done = TRUE; + qj->done = 1; job_set_elapse_time(s, qj, AVAHI_QUERY_HISTORY_MSEC, 0); gettimeofday(&qj->delivery, NULL); @@ -128,12 +150,17 @@ static void job_mark_done(AvahiQueryScheduler *s, AvahiQueryJob *qj) { AvahiQueryScheduler *avahi_query_scheduler_new(AvahiInterface *i) { AvahiQueryScheduler *s; - g_assert(i); + assert(i); + + if (!(s = avahi_new(AvahiQueryScheduler, 1))) { + avahi_log_error(__FILE__": Out of memory"); + return NULL; /* OOM */ + } - s = g_new(AvahiQueryScheduler, 1); s->interface = i; s->time_event_queue = i->monitor->server->time_event_queue; - + s->next_id = 0; + AVAHI_LLIST_HEAD_INIT(AvahiQueryJob, s->jobs); AVAHI_LLIST_HEAD_INIT(AvahiQueryJob, s->history); AVAHI_LLIST_HEAD_INIT(AvahiKnownAnswer, s->known_answers); @@ -142,35 +169,39 @@ AvahiQueryScheduler *avahi_query_scheduler_new(AvahiInterface *i) { } void avahi_query_scheduler_free(AvahiQueryScheduler *s) { - g_assert(s); + assert(s); - g_assert(!s->known_answers); + assert(!s->known_answers); avahi_query_scheduler_clear(s); - g_free(s); + avahi_free(s); } void avahi_query_scheduler_clear(AvahiQueryScheduler *s) { - g_assert(s); - + assert(s); + while (s->jobs) job_free(s, s->jobs); while (s->history) job_free(s, s->history); } -static gpointer known_answer_walk_callback(AvahiCache *c, AvahiKey *pattern, AvahiCacheEntry *e, gpointer userdata) { +static void* known_answer_walk_callback(AvahiCache *c, AvahiKey *pattern, AvahiCacheEntry *e, void* userdata) { AvahiQueryScheduler *s = userdata; AvahiKnownAnswer *ka; - - g_assert(c); - g_assert(pattern); - g_assert(e); - g_assert(s); + + assert(c); + assert(pattern); + assert(e); + assert(s); if (avahi_cache_entry_half_ttl(c, e)) return NULL; - - ka = g_new0(AvahiKnownAnswer, 1); + + if (!(ka = avahi_new0(AvahiKnownAnswer, 1))) { + avahi_log_error(__FILE__": Out of memory"); + return NULL; + } + ka->scheduler = s; ka->record = avahi_record_ref(e->record); @@ -178,34 +209,34 @@ static gpointer known_answer_walk_callback(AvahiCache *c, AvahiKey *pattern, Ava return NULL; } -static gboolean packet_add_query_job(AvahiQueryScheduler *s, AvahiDnsPacket *p, AvahiQueryJob *qj) { - g_assert(s); - g_assert(p); - g_assert(qj); +static int packet_add_query_job(AvahiQueryScheduler *s, AvahiDnsPacket *p, AvahiQueryJob *qj) { + assert(s); + assert(p); + assert(qj); - if (!avahi_dns_packet_append_key(p, qj->key, FALSE)) - return FALSE; + if (!avahi_dns_packet_append_key(p, qj->key, 0)) + return 0; /* Add all matching known answers to the list */ avahi_cache_walk(s->interface->cache, qj->key, known_answer_walk_callback, s); - + job_mark_done(s, qj); - return TRUE; + return 1; } static void append_known_answers_and_send(AvahiQueryScheduler *s, AvahiDnsPacket *p) { AvahiKnownAnswer *ka; - guint n; - g_assert(s); - g_assert(p); + unsigned n; + assert(s); + assert(p); n = 0; - + while ((ka = s->known_answers)) { - gboolean too_large = FALSE; + int too_large = 0; - while (!avahi_dns_packet_append_record(p, ka->record, FALSE, 0)) { + while (!avahi_dns_packet_append_record(p, ka->record, 0, 0)) { if (avahi_dns_packet_is_empty(p)) { /* The record is too large to fit into one packet, so @@ -213,7 +244,7 @@ static void append_known_answers_and_send(AvahiQueryScheduler *s, AvahiDnsPacket the owner of the record send it as a response. This has the advantage of a cache refresh. */ - too_large = TRUE; + too_large = 1; break; } @@ -228,25 +259,25 @@ static void append_known_answers_and_send(AvahiQueryScheduler *s, AvahiDnsPacket AVAHI_LLIST_REMOVE(AvahiKnownAnswer, known_answer, s->known_answers, ka); avahi_record_unref(ka->record); - g_free(ka); + avahi_free(ka); if (!too_large) n++; } - + avahi_dns_packet_set_field(p, AVAHI_DNS_FIELD_ANCOUNT, n); avahi_interface_send_packet(s->interface, p); avahi_dns_packet_free(p); } -static void elapse_callback(AvahiTimeEvent *e, gpointer data) { +static void elapse_callback(AVAHI_GCC_UNUSED AvahiTimeEvent *e, void* data) { AvahiQueryJob *qj = data; AvahiQueryScheduler *s; AvahiDnsPacket *p; - guint n; - gboolean b; + unsigned n; + int b; - g_assert(qj); + assert(qj); s = qj->scheduler; if (qj->done) { @@ -255,11 +286,13 @@ static void elapse_callback(AvahiTimeEvent *e, gpointer data) { return; } - g_assert(!s->known_answers); - - p = avahi_dns_packet_new_query(s->interface->hardware->mtu); + assert(!s->known_answers); + + if (!(p = avahi_dns_packet_new_query(s->interface->hardware->mtu))) + return; /* OOM */ + b = packet_add_query_job(s, p, qj); - g_assert(b); /* An query must always fit in */ + assert(b); /* An query must always fit in */ n = 1; /* Try to fill up packet with more queries, if available */ @@ -280,12 +313,12 @@ static void elapse_callback(AvahiTimeEvent *e, gpointer data) { static AvahiQueryJob* find_scheduled_job(AvahiQueryScheduler *s, AvahiKey *key) { AvahiQueryJob *qj; - g_assert(s); - g_assert(key); + assert(s); + assert(key); for (qj = s->jobs; qj; qj = qj->jobs_next) { - g_assert(!qj->done); - + assert(!qj->done); + if (avahi_key_equal(qj->key, key)) return qj; } @@ -295,12 +328,12 @@ static AvahiQueryJob* find_scheduled_job(AvahiQueryScheduler *s, AvahiKey *key) static AvahiQueryJob* find_history_job(AvahiQueryScheduler *s, AvahiKey *key) { AvahiQueryJob *qj; - - g_assert(s); - g_assert(key); + + assert(s); + assert(key); for (qj = s->history; qj; qj = qj->jobs_next) { - g_assert(qj->done); + assert(qj->done); if (avahi_key_equal(qj->key, key)) { /* Check whether this entry is outdated */ @@ -310,7 +343,7 @@ static AvahiQueryJob* find_history_job(AvahiQueryScheduler *s, AvahiKey *key) { job_free(s, qj); return NULL; } - + return qj; } } @@ -318,62 +351,100 @@ static AvahiQueryJob* find_history_job(AvahiQueryScheduler *s, AvahiKey *key) { return NULL; } -gboolean avahi_query_scheduler_post(AvahiQueryScheduler *s, AvahiKey *key, gboolean immediately) { +int avahi_query_scheduler_post(AvahiQueryScheduler *s, AvahiKey *key, int immediately, unsigned *ret_id) { struct timeval tv; AvahiQueryJob *qj; - - g_assert(s); - g_assert(key); - if ((qj = find_history_job(s, key))) { -/* avahi_log_debug("Query suppressed by local duplicate suppression (history)"); */ - return FALSE; - } - + assert(s); + assert(key); + + if ((qj = find_history_job(s, key))) + return 0; + avahi_elapse_time(&tv, immediately ? 0 : AVAHI_QUERY_DEFER_MSEC, 0); if ((qj = find_scheduled_job(s, key))) { /* Duplicate questions suppression */ -/* avahi_log_debug("Query suppressed by local duplicate suppression (scheduled)"); */ - if (avahi_timeval_compare(&tv, &qj->delivery) < 0) { /* If the new entry should be scheduled earlier, * update the old entry */ qj->delivery = tv; - avahi_time_event_queue_update(s->time_event_queue, qj->time_event, &qj->delivery); + avahi_time_event_update(qj->time_event, &qj->delivery); } - return TRUE; + qj->n_posted++; + } else { -/* avahi_log_debug("Accepted new query job.\n"); */ - qj = job_new(s, key, FALSE); + if (!(qj = job_new(s, key, 0))) + return 0; /* OOM */ + qj->delivery = tv; - qj->time_event = avahi_time_event_queue_add(s->time_event_queue, &qj->delivery, elapse_callback, qj); - - return TRUE; + qj->time_event = avahi_time_event_new(s->time_event_queue, &qj->delivery, elapse_callback, qj); } + + if (ret_id) + *ret_id = qj->id; + + return 1; } void avahi_query_scheduler_incoming(AvahiQueryScheduler *s, AvahiKey *key) { AvahiQueryJob *qj; - - g_assert(s); - g_assert(key); + + assert(s); + assert(key); /* This function is called whenever an incoming query was - * receieved. We drop scheduled queries that match. The keyword is + * received. We drop scheduled queries that match. The keyword is * "DUPLICATE QUESTION SUPPRESION". */ if ((qj = find_scheduled_job(s, key))) { -/* avahi_log_debug("Query suppressed by distributed duplicate suppression"); */ job_mark_done(s, qj); return; } - - qj = job_new(s, key, TRUE); + + /* Look if there's a history job for this key. If there is, just + * update the elapse time */ + if (!(qj = find_history_job(s, key))) + if (!(qj = job_new(s, key, 1))) + return; /* OOM */ + gettimeofday(&qj->delivery, NULL); job_set_elapse_time(s, qj, AVAHI_QUERY_HISTORY_MSEC, 0); } +int avahi_query_scheduler_withdraw_by_id(AvahiQueryScheduler *s, unsigned id) { + AvahiQueryJob *qj; + + assert(s); + + /* Very short lived queries can withdraw an already scheduled item + * from the queue using this function, simply by passing the id + * returned by avahi_query_scheduler_post(). */ + + for (qj = s->jobs; qj; qj = qj->jobs_next) { + assert(!qj->done); + + if (qj->id == id) { + /* Entry found */ + + assert(qj->n_posted >= 1); + + if (--qj->n_posted <= 0) { + + /* We withdraw this job only if the calling object was + * the only remaining poster. (Usually this is the + * case since there should exist only one querier per + * key, but there are exceptions, notably reflected + * traffic.) */ + + job_free(s, qj); + return 1; + } + } + } + + return 0; +}