aboutsummaryrefslogtreecommitdiff
path: root/sys/net/altq/altq_subr.c
diff options
context:
space:
mode:
authorGleb Smirnoff <glebius@FreeBSD.org>2019-01-09 01:11:19 +0000
committerGleb Smirnoff <glebius@FreeBSD.org>2019-01-09 01:11:19 +0000
commita68cc388790587b330a01380a0c1864fb9ff3f1e (patch)
treef5bb270ef868e474632d85d470c6f571f63fbe1a /sys/net/altq/altq_subr.c
parent086566c1c1b306c7eda0485e11c870bfaed6c721 (diff)
downloadsrc-a68cc388790587b330a01380a0c1864fb9ff3f1e.tar.gz
src-a68cc388790587b330a01380a0c1864fb9ff3f1e.zip
Mechanical cleanup of epoch(9) usage in network stack.
- Remove macros that covertly create epoch_tracker on thread stack. Such macros a quite unsafe, e.g. will produce a buggy code if same macro is used in embedded scopes. Explicitly declare epoch_tracker always. - Unmask interface list IFNET_RLOCK_NOSLEEP(), interface address list IF_ADDR_RLOCK() and interface AF specific data IF_AFDATA_RLOCK() read locking macros to what they actually are - the net_epoch. Keeping them as is is very misleading. They all are named FOO_RLOCK(), while they no longer have lock semantics. Now they allow recursion and what's more important they now no longer guarantee protection against their companion WLOCK macros. Note: INP_HASH_RLOCK() has same problems, but not touched by this commit. This is non functional mechanical change. The only functionally changed functions are ni6_addrs() and ni6_store_addrs(), where we no longer enter epoch recursively. Discussed with: jtl, gallatin
Notes
Notes: svn path=/head/; revision=342872
Diffstat (limited to 'sys/net/altq/altq_subr.c')
-rw-r--r--sys/net/altq/altq_subr.c9
1 files changed, 4 insertions, 5 deletions
diff --git a/sys/net/altq/altq_subr.c b/sys/net/altq/altq_subr.c
index af919d5f499d..4e840a5b2e32 100644
--- a/sys/net/altq/altq_subr.c
+++ b/sys/net/altq/altq_subr.c
@@ -410,11 +410,11 @@ tbr_timeout(arg)
{
VNET_ITERATOR_DECL(vnet_iter);
struct ifnet *ifp;
- int active, s;
+ struct epoch_tracker et;
+ int active;
active = 0;
- s = splnet();
- IFNET_RLOCK_NOSLEEP();
+ NET_EPOCH_ENTER(et);
VNET_LIST_RLOCK_NOSLEEP();
VNET_FOREACH(vnet_iter) {
CURVNET_SET(vnet_iter);
@@ -431,8 +431,7 @@ tbr_timeout(arg)
CURVNET_RESTORE();
}
VNET_LIST_RUNLOCK_NOSLEEP();
- IFNET_RUNLOCK_NOSLEEP();
- splx(s);
+ NET_EPOCH_EXIT(et);
if (active > 0)
CALLOUT_RESET(&tbr_callout, 1, tbr_timeout, (void *)0);
else