2 * libunbound/worker.c - worker thread or process that resolves
4 * Copyright (c) 2007, NLnet Labs. All rights reserved.
6 * This software is open source.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * Redistributions of source code must retain the above copyright notice,
13 * this list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
19 * Neither the name of the NLNET LABS nor the names of its contributors may
20 * be used to endorse or promote products derived from this software without
21 * specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 * This file contains the worker process or thread that performs
40 * the DNS resolving and validation. The worker is called by a procedure
41 * and if in the background continues until exit, if in the foreground
42 * returns from the procedure when done.
46 #include <openssl/ssl.h>
48 #include "libunbound/libworker.h"
49 #include "libunbound/context.h"
50 #include "libunbound/unbound.h"
51 #include "libunbound/worker.h"
52 #include "libunbound/unbound-event.h"
53 #include "services/outside_network.h"
54 #include "services/mesh.h"
55 #include "services/localzone.h"
56 #include "services/cache/rrset.h"
57 #include "services/outbound_list.h"
58 #include "services/authzone.h"
59 #include "util/fptr_wlist.h"
60 #include "util/module.h"
61 #include "util/regional.h"
62 #include "util/random.h"
63 #include "util/config_file.h"
64 #include "util/netevent.h"
65 #include "util/storage/lookup3.h"
66 #include "util/storage/slabhash.h"
67 #include "util/net_help.h"
68 #include "util/data/dname.h"
69 #include "util/data/msgreply.h"
70 #include "util/data/msgencode.h"
71 #include "util/tube.h"
72 #include "iterator/iter_fwd.h"
73 #include "iterator/iter_hints.h"
74 #include "sldns/sbuffer.h"
75 #include "sldns/str2wire.h"
77 #ifdef HAVE_TARGETCONDITIONALS_H
78 #include <TargetConditionals.h>
81 #if (defined(TARGET_OS_TV) && TARGET_OS_TV) || (defined(TARGET_OS_WATCH) && TARGET_OS_WATCH)
85 /** handle new query command for bg worker */
86 static void handle_newq(struct libworker* w, uint8_t* buf, uint32_t len);
88 /** delete libworker env */
90 libworker_delete_env(struct libworker* w)
93 outside_network_quit_prepare(w->back);
94 mesh_delete(w->env->mesh);
95 context_release_alloc(w->ctx, w->env->alloc,
96 !w->is_bg || w->is_bg_thread);
97 sldns_buffer_free(w->env->scratch_buffer);
98 regional_destroy(w->env->scratch);
99 forwards_delete(w->env->fwds);
100 hints_delete(w->env->hints);
101 ub_randfree(w->env->rnd);
105 SSL_CTX_free(w->sslctx);
107 outside_network_delete(w->back);
110 /** delete libworker struct */
112 libworker_delete(struct libworker* w)
115 libworker_delete_env(w);
116 comm_base_delete(w->base);
121 libworker_delete_event(struct libworker* w)
124 libworker_delete_env(w);
125 comm_base_delete_no_base(w->base);
129 /** setup fresh libworker struct */
130 static struct libworker*
131 libworker_setup(struct ub_ctx* ctx, int is_bg, struct ub_event_base* eb)
133 struct libworker* w = (struct libworker*)calloc(1, sizeof(*w));
134 struct config_file* cfg = ctx->env->cfg;
140 w->env = (struct module_env*)malloc(sizeof(*w->env));
146 w->env->alloc = context_obtain_alloc(ctx, !w->is_bg || w->is_bg_thread);
151 w->thread_num = w->env->alloc->thread_num;
152 alloc_set_id_cleanup(w->env->alloc, &libworker_alloc_cleanup, w);
153 if(!w->is_bg || w->is_bg_thread) {
154 lock_basic_lock(&ctx->cfglock);
156 w->env->scratch = regional_create_custom(cfg->msg_buffer_size);
157 w->env->scratch_buffer = sldns_buffer_new(cfg->msg_buffer_size);
158 w->env->fwds = forwards_create();
159 if(w->env->fwds && !forwards_apply_cfg(w->env->fwds, cfg)) {
160 forwards_delete(w->env->fwds);
163 w->env->hints = hints_create();
164 if(w->env->hints && !hints_apply_cfg(w->env->hints, cfg)) {
165 hints_delete(w->env->hints);
166 w->env->hints = NULL;
168 if(cfg->ssl_upstream || (cfg->tls_cert_bundle && cfg->tls_cert_bundle[0]) || cfg->tls_win_cert) {
169 w->sslctx = connect_sslctx_create(NULL, NULL,
170 cfg->tls_cert_bundle, cfg->tls_win_cert);
172 /* to make the setup fail after unlock */
173 hints_delete(w->env->hints);
174 w->env->hints = NULL;
177 if(!w->is_bg || w->is_bg_thread) {
178 lock_basic_unlock(&ctx->cfglock);
180 if(!w->env->scratch || !w->env->scratch_buffer || !w->env->fwds ||
185 w->env->worker = (struct worker*)w;
186 w->env->probe_timer = NULL;
187 if(!w->is_bg || w->is_bg_thread) {
188 lock_basic_lock(&ctx->cfglock);
190 if(!(w->env->rnd = ub_initstate(ctx->seed_rnd))) {
191 if(!w->is_bg || w->is_bg_thread) {
192 lock_basic_unlock(&ctx->cfglock);
197 if(!w->is_bg || w->is_bg_thread) {
198 lock_basic_unlock(&ctx->cfglock);
201 /* primitive lockout for threading: if it overwrites another
202 * thread it is like wiping the cache (which is likely empty
204 /* note we are holding the ctx lock in normal threaded
205 * cases so that is solved properly, it is only for many ctx
206 * in different threads that this may clash */
207 static int done_raninit = 0;
210 hash_set_raninit((uint32_t)ub_random(w->env->rnd));
215 w->base = comm_base_create_event(eb);
216 else w->base = comm_base_create(0);
221 w->env->worker_base = w->base;
222 if(!w->is_bg || w->is_bg_thread) {
223 lock_basic_lock(&ctx->cfglock);
225 numports = cfg_condense_ports(cfg, &ports);
227 if(!w->is_bg || w->is_bg_thread) {
228 lock_basic_unlock(&ctx->cfglock);
233 w->back = outside_network_create(w->base, cfg->msg_buffer_size,
234 (size_t)cfg->outgoing_num_ports, cfg->out_ifs,
235 cfg->num_out_ifs, cfg->do_ip4, cfg->do_ip6,
236 cfg->do_tcp?cfg->outgoing_num_tcp:0, cfg->ip_dscp,
237 w->env->infra_cache, w->env->rnd, cfg->use_caps_bits_for_id,
238 ports, numports, cfg->unwanted_threshold,
239 cfg->outgoing_tcp_mss, &libworker_alloc_cleanup, w,
240 cfg->do_udp || cfg->udp_upstream_without_downstream, w->sslctx,
241 cfg->delay_close, cfg->tls_use_sni, NULL);
242 w->env->outnet = w->back;
243 if(!w->is_bg || w->is_bg_thread) {
244 lock_basic_unlock(&ctx->cfglock);
251 w->env->mesh = mesh_create(&ctx->mods, w->env);
256 w->env->send_query = &libworker_send_query;
257 w->env->detach_subs = &mesh_detach_subs;
258 w->env->attach_sub = &mesh_attach_sub;
259 w->env->add_sub = &mesh_add_sub;
260 w->env->kill_sub = &mesh_state_delete;
261 w->env->detect_cycle = &mesh_detect_cycle;
262 comm_base_timept(w->base, &w->env->now, &w->env->now_tv);
266 struct libworker* libworker_create_event(struct ub_ctx* ctx,
267 struct ub_event_base* eb)
269 return libworker_setup(ctx, 0, eb);
272 /** handle cancel command for bg worker */
274 handle_cancel(struct libworker* w, uint8_t* buf, uint32_t len)
277 if(w->is_bg_thread) {
278 lock_basic_lock(&w->ctx->cfglock);
279 q = context_deserialize_cancel(w->ctx, buf, len);
280 lock_basic_unlock(&w->ctx->cfglock);
282 q = context_deserialize_cancel(w->ctx, buf, len);
285 /* probably simply lookup failed, i.e. the message had been
286 * processed and answered before the cancel arrived */
293 /** do control command coming into bg server */
295 libworker_do_cmd(struct libworker* w, uint8_t* msg, uint32_t len)
297 switch(context_serial_getcmd(msg, len)) {
299 case UB_LIBCMD_ANSWER:
300 log_err("unknown command for bg worker %d",
301 (int)context_serial_getcmd(msg, len));
302 /* and fall through to quit */
306 comm_base_exit(w->base);
308 case UB_LIBCMD_NEWQUERY:
309 handle_newq(w, msg, len);
311 case UB_LIBCMD_CANCEL:
312 handle_cancel(w, msg, len);
317 /** handle control command coming into server */
319 libworker_handle_control_cmd(struct tube* ATTR_UNUSED(tube),
320 uint8_t* msg, size_t len, int err, void* arg)
322 struct libworker* w = (struct libworker*)arg;
326 /* it is of no use to go on, exit */
327 comm_base_exit(w->base);
330 libworker_do_cmd(w, msg, len); /* also frees the buf */
333 /** the background thread func */
335 libworker_dobg(void* arg)
339 struct libworker* w = (struct libworker*)arg;
342 log_err("libunbound bg worker init failed, nomem");
346 log_thread_set(&w->thread_num);
347 #ifdef THREADS_DISABLED
350 /* close non-used parts of the pipes */
351 tube_close_write(ctx->qq_pipe);
352 tube_close_read(ctx->rr_pipe);
354 if(!tube_setup_bg_listen(ctx->qq_pipe, w->base,
355 libworker_handle_control_cmd, w)) {
356 log_err("libunbound bg worker init failed, no bglisten");
359 if(!tube_setup_bg_write(ctx->rr_pipe, w->base)) {
360 log_err("libunbound bg worker init failed, no bgwrite");
365 comm_base_dispatch(w->base);
370 tube_remove_bg_listen(w->ctx->qq_pipe);
371 tube_remove_bg_write(w->ctx->rr_pipe);
373 (void)tube_write_msg(ctx->rr_pipe, (uint8_t*)&m,
374 (uint32_t)sizeof(m), 0);
375 #ifdef THREADS_DISABLED
376 /* close pipes from forked process before exit */
377 tube_close_read(ctx->qq_pipe);
378 tube_close_write(ctx->rr_pipe);
383 int libworker_bg(struct ub_ctx* ctx)
386 /* fork or threadcreate */
387 lock_basic_lock(&ctx->cfglock);
389 lock_basic_unlock(&ctx->cfglock);
390 w = libworker_setup(ctx, 1, NULL);
391 if(!w) return UB_NOMEM;
393 #ifdef ENABLE_LOCK_CHECKS
394 w->thread_num = 1; /* for nicer DEBUG checklocks */
396 ub_thread_create(&ctx->bg_tid, libworker_dobg, w);
398 lock_basic_unlock(&ctx->cfglock);
400 /* no fork on windows */
402 #else /* HAVE_FORK */
403 switch((ctx->bg_pid=fork())) {
405 w = libworker_setup(ctx, 1, NULL);
406 if(!w) fatal_exit("out of memory");
407 /* close non-used parts of the pipes */
408 tube_close_write(ctx->qq_pipe);
409 tube_close_read(ctx->rr_pipe);
410 (void)libworker_dobg(w);
416 /* close non-used parts, so that the worker
417 * bgprocess gets 'pipe closed' when the
418 * main process exits */
419 tube_close_read(ctx->qq_pipe);
420 tube_close_write(ctx->rr_pipe);
423 #endif /* HAVE_FORK */
428 /** insert canonname */
430 fill_canon(struct ub_result* res, uint8_t* s)
434 res->canonname = strdup(buf);
435 return res->canonname != 0;
438 /** fill data into result */
440 fill_res(struct ub_result* res, struct ub_packed_rrset_key* answer,
441 uint8_t* finalcname, struct query_info* rq, struct reply_info* rep)
444 struct packed_rrset_data* data;
448 if(!fill_canon(res, finalcname))
449 return 0; /* out of memory */
451 if(rep->rrset_count != 0)
452 res->ttl = (int)rep->ttl;
453 res->data = (char**)calloc(1, sizeof(char*));
454 res->len = (int*)calloc(1, sizeof(int));
455 return (res->data && res->len);
457 data = (struct packed_rrset_data*)answer->entry.data;
458 if(query_dname_compare(rq->qname, answer->rk.dname) != 0) {
459 if(!fill_canon(res, answer->rk.dname))
460 return 0; /* out of memory */
461 } else res->canonname = NULL;
462 res->data = (char**)calloc(data->count+1, sizeof(char*));
463 res->len = (int*)calloc(data->count+1, sizeof(int));
464 if(!res->data || !res->len)
465 return 0; /* out of memory */
466 for(i=0; i<data->count; i++) {
467 /* remove rdlength from rdata */
468 res->len[i] = (int)(data->rr_len[i] - 2);
469 res->data[i] = memdup(data->rr_data[i]+2, (size_t)res->len[i]);
471 return 0; /* out of memory */
473 /* ttl for positive answers, from CNAME and answer RRs */
474 if(data->count != 0) {
476 res->ttl = (int)data->ttl;
477 for(j=0; j<rep->an_numrrsets; j++) {
478 struct packed_rrset_data* d =
479 (struct packed_rrset_data*)rep->rrsets[j]->
481 if((int)d->ttl < res->ttl)
482 res->ttl = (int)d->ttl;
485 /* ttl for negative answers */
486 if(data->count == 0 && rep->rrset_count != 0)
487 res->ttl = (int)rep->ttl;
488 res->data[data->count] = NULL;
489 res->len[data->count] = 0;
493 /** fill result from parsed message, on error fills servfail */
495 libworker_enter_result(struct ub_result* res, sldns_buffer* buf,
496 struct regional* temp, enum sec_status msg_security)
498 struct query_info rq;
499 struct reply_info* rep;
500 res->rcode = LDNS_RCODE_SERVFAIL;
501 rep = parse_reply_in_temp_region(buf, temp, &rq);
503 log_err("cannot parse buf");
504 return; /* error parsing buf, or out of memory */
506 if(!fill_res(res, reply_find_answer_rrset(&rq, rep),
507 reply_find_final_cname_target(&rq, rep), &rq, rep))
508 return; /* out of memory */
509 /* rcode, havedata, nxdomain, secure, bogus */
510 res->rcode = (int)FLAGS_GET_RCODE(rep->flags);
511 if(res->data && res->data[0])
513 if(res->rcode == LDNS_RCODE_NXDOMAIN)
515 if(msg_security == sec_status_secure)
517 if(msg_security == sec_status_bogus ||
518 msg_security == sec_status_secure_sentinel_fail)
522 /** fillup fg results */
524 libworker_fillup_fg(struct ctx_query* q, int rcode, sldns_buffer* buf,
525 enum sec_status s, char* why_bogus, int was_ratelimited)
527 q->res->was_ratelimited = was_ratelimited;
529 q->res->why_bogus = strdup(why_bogus);
531 q->res->rcode = rcode;
536 q->res->rcode = LDNS_RCODE_SERVFAIL;
537 q->msg_security = sec_status_unchecked;
538 q->msg = memdup(sldns_buffer_begin(buf), sldns_buffer_limit(buf));
539 q->msg_len = sldns_buffer_limit(buf);
541 return; /* the error is in the rcode */
544 /* canonname and results */
546 libworker_enter_result(q->res, buf, q->w->env->scratch, s);
550 libworker_fg_done_cb(void* arg, int rcode, sldns_buffer* buf, enum sec_status s,
551 char* why_bogus, int was_ratelimited)
553 struct ctx_query* q = (struct ctx_query*)arg;
554 /* fg query is done; exit comm base */
555 comm_base_exit(q->w->base);
557 libworker_fillup_fg(q, rcode, buf, s, why_bogus, was_ratelimited);
560 /** setup qinfo and edns */
562 setup_qinfo_edns(struct libworker* w, struct ctx_query* q,
563 struct query_info* qinfo, struct edns_data* edns)
565 qinfo->qtype = (uint16_t)q->res->qtype;
566 qinfo->qclass = (uint16_t)q->res->qclass;
567 qinfo->local_alias = NULL;
568 qinfo->qname = sldns_str2wire_dname(q->res->qname, &qinfo->qname_len);
572 edns->edns_present = 1;
574 edns->edns_version = 0;
575 edns->bits = EDNS_DO;
576 edns->opt_list = NULL;
577 if(sldns_buffer_capacity(w->back->udp_buff) < 65535)
578 edns->udp_size = (uint16_t)sldns_buffer_capacity(
580 else edns->udp_size = 65535;
584 int libworker_fg(struct ub_ctx* ctx, struct ctx_query* q)
586 struct libworker* w = libworker_setup(ctx, 0, NULL);
587 uint16_t qflags, qid;
588 struct query_info qinfo;
589 struct edns_data edns;
592 if(!setup_qinfo_edns(w, q, &qinfo, &edns)) {
599 /* see if there is a fixed answer */
600 sldns_buffer_write_u16_at(w->back->udp_buff, 0, qid);
601 sldns_buffer_write_u16_at(w->back->udp_buff, 2, qflags);
602 if(local_zones_answer(ctx->local_zones, w->env, &qinfo, &edns,
603 w->back->udp_buff, w->env->scratch, NULL, NULL, 0, NULL, 0,
604 NULL, 0, NULL, 0, NULL)) {
605 regional_free_all(w->env->scratch);
606 libworker_fillup_fg(q, LDNS_RCODE_NOERROR,
607 w->back->udp_buff, sec_status_insecure, NULL, 0);
612 if(ctx->env->auth_zones && auth_zones_answer(ctx->env->auth_zones,
613 w->env, &qinfo, &edns, NULL, w->back->udp_buff, w->env->scratch)) {
614 regional_free_all(w->env->scratch);
615 libworker_fillup_fg(q, LDNS_RCODE_NOERROR,
616 w->back->udp_buff, sec_status_insecure, NULL, 0);
621 /* process new query */
622 if(!mesh_new_callback(w->env->mesh, &qinfo, qflags, &edns,
623 w->back->udp_buff, qid, libworker_fg_done_cb, q)) {
630 comm_base_dispatch(w->base);
637 libworker_event_done_cb(void* arg, int rcode, sldns_buffer* buf,
638 enum sec_status s, char* why_bogus, int was_ratelimited)
640 struct ctx_query* q = (struct ctx_query*)arg;
641 ub_event_callback_type cb = q->cb_event;
642 void* cb_arg = q->cb_arg;
643 int cancelled = q->cancelled;
646 struct ub_ctx* ctx = q->w->ctx;
647 lock_basic_lock(&ctx->cfglock);
648 (void)rbtree_delete(&ctx->queries, q->node.key);
650 context_query_delete(q);
651 lock_basic_unlock(&ctx->cfglock);
656 if(s == sec_status_bogus)
658 else if(s == sec_status_secure)
660 (*cb)(cb_arg, rcode, (buf?(void*)sldns_buffer_begin(buf):NULL),
661 (buf?(int)sldns_buffer_limit(buf):0), sec, why_bogus, was_ratelimited);
665 int libworker_attach_mesh(struct ub_ctx* ctx, struct ctx_query* q,
668 struct libworker* w = ctx->event_worker;
669 uint16_t qflags, qid;
670 struct query_info qinfo;
671 struct edns_data edns;
674 if(!setup_qinfo_edns(w, q, &qinfo, &edns))
679 /* see if there is a fixed answer */
680 sldns_buffer_write_u16_at(w->back->udp_buff, 0, qid);
681 sldns_buffer_write_u16_at(w->back->udp_buff, 2, qflags);
682 if(local_zones_answer(ctx->local_zones, w->env, &qinfo, &edns,
683 w->back->udp_buff, w->env->scratch, NULL, NULL, 0, NULL, 0,
684 NULL, 0, NULL, 0, NULL)) {
685 regional_free_all(w->env->scratch);
687 libworker_event_done_cb(q, LDNS_RCODE_NOERROR,
688 w->back->udp_buff, sec_status_insecure, NULL, 0);
691 if(ctx->env->auth_zones && auth_zones_answer(ctx->env->auth_zones,
692 w->env, &qinfo, &edns, NULL, w->back->udp_buff, w->env->scratch)) {
693 regional_free_all(w->env->scratch);
695 libworker_event_done_cb(q, LDNS_RCODE_NOERROR,
696 w->back->udp_buff, sec_status_insecure, NULL, 0);
699 /* process new query */
701 *async_id = q->querynum;
702 if(!mesh_new_callback(w->env->mesh, &qinfo, qflags, &edns,
703 w->back->udp_buff, qid, libworker_event_done_cb, q)) {
711 /** add result to the bg worker result queue */
713 add_bg_result(struct libworker* w, struct ctx_query* q, sldns_buffer* pkt,
714 int err, char* reason, int was_ratelimited)
720 context_query_delete(q);
723 /* serialize and delete unneeded q */
724 if(w->is_bg_thread) {
725 lock_basic_lock(&w->ctx->cfglock);
727 q->res->why_bogus = strdup(reason);
728 q->res->was_ratelimited = was_ratelimited;
730 q->msg_len = sldns_buffer_remaining(pkt);
731 q->msg = memdup(sldns_buffer_begin(pkt), q->msg_len);
733 msg = context_serialize_answer(q, UB_NOMEM, NULL, &len);
735 msg = context_serialize_answer(q, err, NULL, &len);
738 msg = context_serialize_answer(q, err, NULL, &len);
740 lock_basic_unlock(&w->ctx->cfglock);
743 q->res->why_bogus = strdup(reason);
744 q->res->was_ratelimited = was_ratelimited;
745 msg = context_serialize_answer(q, err, pkt, &len);
746 (void)rbtree_delete(&w->ctx->queries, q->node.key);
748 context_query_delete(q);
752 log_err("out of memory for async answer");
755 if(!tube_queue_item(w->ctx->rr_pipe, msg, len)) {
756 log_err("out of memory for async answer");
762 libworker_bg_done_cb(void* arg, int rcode, sldns_buffer* buf, enum sec_status s,
763 char* why_bogus, int was_ratelimited)
765 struct ctx_query* q = (struct ctx_query*)arg;
767 if(q->cancelled || q->w->back->want_to_quit) {
768 if(q->w->is_bg_thread) {
770 struct ub_ctx* ctx = q->w->ctx;
771 lock_basic_lock(&ctx->cfglock);
772 (void)rbtree_delete(&ctx->queries, q->node.key);
774 context_query_delete(q);
775 lock_basic_unlock(&ctx->cfglock);
777 /* cancelled, do not give answer */
782 buf = q->w->env->scratch_buffer;
785 error_encode(buf, rcode, NULL, 0, BIT_RD, NULL);
787 add_bg_result(q->w, q, buf, UB_NOERROR, why_bogus, was_ratelimited);
791 /** handle new query command for bg worker */
793 handle_newq(struct libworker* w, uint8_t* buf, uint32_t len)
795 uint16_t qflags, qid;
796 struct query_info qinfo;
797 struct edns_data edns;
799 if(w->is_bg_thread) {
800 lock_basic_lock(&w->ctx->cfglock);
801 q = context_lookup_new_query(w->ctx, buf, len);
802 lock_basic_unlock(&w->ctx->cfglock);
804 q = context_deserialize_new_query(w->ctx, buf, len);
808 log_err("failed to deserialize newq");
811 if(!setup_qinfo_edns(w, q, &qinfo, &edns)) {
812 add_bg_result(w, q, NULL, UB_SYNTAX, NULL, 0);
817 /* see if there is a fixed answer */
818 sldns_buffer_write_u16_at(w->back->udp_buff, 0, qid);
819 sldns_buffer_write_u16_at(w->back->udp_buff, 2, qflags);
820 if(local_zones_answer(w->ctx->local_zones, w->env, &qinfo, &edns,
821 w->back->udp_buff, w->env->scratch, NULL, NULL, 0, NULL, 0,
822 NULL, 0, NULL, 0, NULL)) {
823 regional_free_all(w->env->scratch);
824 q->msg_security = sec_status_insecure;
825 add_bg_result(w, q, w->back->udp_buff, UB_NOERROR, NULL, 0);
829 if(w->ctx->env->auth_zones && auth_zones_answer(w->ctx->env->auth_zones,
830 w->env, &qinfo, &edns, NULL, w->back->udp_buff, w->env->scratch)) {
831 regional_free_all(w->env->scratch);
832 q->msg_security = sec_status_insecure;
833 add_bg_result(w, q, w->back->udp_buff, UB_NOERROR, NULL, 0);
838 /* process new query */
839 if(!mesh_new_callback(w->env->mesh, &qinfo, qflags, &edns,
840 w->back->udp_buff, qid, libworker_bg_done_cb, q)) {
841 add_bg_result(w, q, NULL, UB_NOMEM, NULL, 0);
846 void libworker_alloc_cleanup(void* arg)
848 struct libworker* w = (struct libworker*)arg;
849 slabhash_clear(&w->env->rrset_cache->table);
850 slabhash_clear(w->env->msg_cache);
853 struct outbound_entry* libworker_send_query(struct query_info* qinfo,
854 uint16_t flags, int dnssec, int want_dnssec, int nocaps,
855 struct sockaddr_storage* addr, socklen_t addrlen, uint8_t* zone,
856 size_t zonelen, int ssl_upstream, char* tls_auth_name,
857 struct module_qstate* q)
859 struct libworker* w = (struct libworker*)q->env->worker;
860 struct outbound_entry* e = (struct outbound_entry*)regional_alloc(
861 q->region, sizeof(*e));
865 e->qsent = outnet_serviced_query(w->back, qinfo, flags, dnssec,
866 want_dnssec, nocaps, q->env->cfg->tcp_upstream, ssl_upstream,
867 tls_auth_name, addr, addrlen, zone, zonelen, q,
868 libworker_handle_service_reply, e, w->back->udp_buff, q->env);
876 libworker_handle_reply(struct comm_point* c, void* arg, int error,
877 struct comm_reply* reply_info)
879 struct module_qstate* q = (struct module_qstate*)arg;
880 struct libworker* lw = (struct libworker*)q->env->worker;
881 struct outbound_entry e;
886 mesh_report_reply(lw->env->mesh, &e, reply_info, error);
890 if(!LDNS_QR_WIRE(sldns_buffer_begin(c->buffer))
891 || LDNS_OPCODE_WIRE(sldns_buffer_begin(c->buffer)) !=
893 || LDNS_QDCOUNT(sldns_buffer_begin(c->buffer)) > 1) {
894 /* error becomes timeout for the module as if this reply
896 mesh_report_reply(lw->env->mesh, &e, reply_info,
900 mesh_report_reply(lw->env->mesh, &e, reply_info, NETEVENT_NOERROR);
905 libworker_handle_service_reply(struct comm_point* c, void* arg, int error,
906 struct comm_reply* reply_info)
908 struct outbound_entry* e = (struct outbound_entry*)arg;
909 struct libworker* lw = (struct libworker*)e->qstate->env->worker;
912 mesh_report_reply(lw->env->mesh, e, reply_info, error);
916 if(!LDNS_QR_WIRE(sldns_buffer_begin(c->buffer))
917 || LDNS_OPCODE_WIRE(sldns_buffer_begin(c->buffer)) !=
919 || LDNS_QDCOUNT(sldns_buffer_begin(c->buffer)) > 1) {
920 /* error becomes timeout for the module as if this reply
922 mesh_report_reply(lw->env->mesh, e, reply_info,
926 mesh_report_reply(lw->env->mesh, e, reply_info, NETEVENT_NOERROR);
930 /* --- fake callbacks for fptr_wlist to work --- */
931 void worker_handle_control_cmd(struct tube* ATTR_UNUSED(tube),
932 uint8_t* ATTR_UNUSED(buffer), size_t ATTR_UNUSED(len),
933 int ATTR_UNUSED(error), void* ATTR_UNUSED(arg))
938 int worker_handle_request(struct comm_point* ATTR_UNUSED(c),
939 void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
940 struct comm_reply* ATTR_UNUSED(repinfo))
946 int worker_handle_reply(struct comm_point* ATTR_UNUSED(c),
947 void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
948 struct comm_reply* ATTR_UNUSED(reply_info))
954 int worker_handle_service_reply(struct comm_point* ATTR_UNUSED(c),
955 void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
956 struct comm_reply* ATTR_UNUSED(reply_info))
962 int remote_accept_callback(struct comm_point* ATTR_UNUSED(c),
963 void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
964 struct comm_reply* ATTR_UNUSED(repinfo))
970 int remote_control_callback(struct comm_point* ATTR_UNUSED(c),
971 void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
972 struct comm_reply* ATTR_UNUSED(repinfo))
978 void worker_sighandler(int ATTR_UNUSED(sig), void* ATTR_UNUSED(arg))
983 struct outbound_entry* worker_send_query(struct query_info* ATTR_UNUSED(qinfo),
984 uint16_t ATTR_UNUSED(flags), int ATTR_UNUSED(dnssec),
985 int ATTR_UNUSED(want_dnssec), int ATTR_UNUSED(nocaps),
986 struct sockaddr_storage* ATTR_UNUSED(addr), socklen_t ATTR_UNUSED(addrlen),
987 uint8_t* ATTR_UNUSED(zone), size_t ATTR_UNUSED(zonelen),
988 int ATTR_UNUSED(ssl_upstream), char* ATTR_UNUSED(tls_auth_name),
989 struct module_qstate* ATTR_UNUSED(q))
996 worker_alloc_cleanup(void* ATTR_UNUSED(arg))
1001 void worker_stat_timer_cb(void* ATTR_UNUSED(arg))
1006 void worker_probe_timer_cb(void* ATTR_UNUSED(arg))
1011 void worker_start_accept(void* ATTR_UNUSED(arg))
1016 void worker_stop_accept(void* ATTR_UNUSED(arg))
1021 int order_lock_cmp(const void* ATTR_UNUSED(e1), const void* ATTR_UNUSED(e2))
1028 codeline_cmp(const void* ATTR_UNUSED(a), const void* ATTR_UNUSED(b))
1034 int replay_var_compare(const void* ATTR_UNUSED(a), const void* ATTR_UNUSED(b))
1040 void remote_get_opt_ssl(char* ATTR_UNUSED(str), void* ATTR_UNUSED(arg))
1045 #ifdef UB_ON_WINDOWS
1047 worker_win_stop_cb(int ATTR_UNUSED(fd), short ATTR_UNUSED(ev), void*
1053 wsvc_cron_cb(void* ATTR_UNUSED(arg))
1057 #endif /* UB_ON_WINDOWS */
1060 void dtio_tap_callback(int ATTR_UNUSED(fd), short ATTR_UNUSED(ev),
1061 void* ATTR_UNUSED(arg))
1068 void dtio_mainfdcallback(int ATTR_UNUSED(fd), short ATTR_UNUSED(ev),
1069 void* ATTR_UNUSED(arg))