aboutsummaryrefslogtreecommitdiff
path: root/contrib/unbound/iterator/iterator.c
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/unbound/iterator/iterator.c')
-rw-r--r--contrib/unbound/iterator/iterator.c36
1 files changed, 35 insertions, 1 deletions
diff --git a/contrib/unbound/iterator/iterator.c b/contrib/unbound/iterator/iterator.c
index 25e5cfee4645..da9b7990c506 100644
--- a/contrib/unbound/iterator/iterator.c
+++ b/contrib/unbound/iterator/iterator.c
@@ -1218,6 +1218,15 @@ generate_dnskey_prefetch(struct module_qstate* qstate,
(qstate->query_flags&BIT_RD) && !(qstate->query_flags&BIT_CD)){
return;
}
+ /* we do not generate this prefetch when the query list is full,
+ * the query is fetched, if needed, when the validator wants it.
+ * At that time the validator waits for it, after spawning it.
+ * This means there is one state that uses cpu and a socket, the
+ * spawned while this one waits, and not several at the same time,
+ * if we had created the lookup here. And this helps to keep
+ * the total load down, but the query still succeeds to resolve. */
+ if(mesh_jostle_exceeded(qstate->env->mesh))
+ return;
/* if the DNSKEY is in the cache this lookup will stop quickly */
log_nametypeclass(VERB_ALGO, "schedule dnskey prefetch",
@@ -1911,6 +1920,14 @@ query_for_targets(struct module_qstate* qstate, struct iter_qstate* iq,
return 0;
}
query_count++;
+ /* If the mesh query list is full, exit the loop here.
+ * This makes the routine spawn one query at a time,
+ * and this means there is no query state load
+ * increase, because the spawned state uses cpu and a
+ * socket while this state waits for that spawned
+ * state. Next time we can look up further targets */
+ if(mesh_jostle_exceeded(qstate->env->mesh))
+ break;
}
/* Send the A request. */
if(ie->supports_ipv4 &&
@@ -1925,6 +1942,9 @@ query_for_targets(struct module_qstate* qstate, struct iter_qstate* iq,
return 0;
}
query_count++;
+ /* If the mesh query list is full, exit the loop. */
+ if(mesh_jostle_exceeded(qstate->env->mesh))
+ break;
}
/* mark this target as in progress. */
@@ -2085,6 +2105,15 @@ processLastResort(struct module_qstate* qstate, struct iter_qstate* iq,
}
ns->done_pside6 = 1;
query_count++;
+ if(mesh_jostle_exceeded(qstate->env->mesh)) {
+ /* Wait for the lookup; do not spawn multiple
+ * lookups at a time. */
+ verbose(VERB_ALGO, "try parent-side glue lookup");
+ iq->num_target_queries += query_count;
+ target_count_increase(iq, query_count);
+ qstate->ext_state[id] = module_wait_subquery;
+ return 0;
+ }
}
if(ie->supports_ipv4 && !ns->done_pside4) {
/* Send the A request. */
@@ -2560,7 +2589,12 @@ processQueryTargets(struct module_qstate* qstate, struct iter_qstate* iq,
if(iq->depth < ie->max_dependency_depth
&& iq->num_target_queries == 0
&& (!iq->target_count || iq->target_count[TARGET_COUNT_NX]==0)
- && iq->sent_count < TARGET_FETCH_STOP) {
+ && iq->sent_count < TARGET_FETCH_STOP
+ /* if the mesh query list is full, then do not waste cpu
+ * and sockets to fetch promiscuous targets. They can be
+ * looked up when needed. */
+ && !mesh_jostle_exceeded(qstate->env->mesh)
+ ) {
tf_policy = ie->target_fetch_policy[iq->depth];
}