aboutsummaryrefslogtreecommitdiff
path: root/sys/netipsec
diff options
context:
space:
mode:
authorMateusz Guzik <mjg@FreeBSD.org>2018-06-08 21:40:03 +0000
committerMateusz Guzik <mjg@FreeBSD.org>2018-06-08 21:40:03 +0000
commit4e180881ae4a325cfaeef336aa1a16b0d5bbae67 (patch)
treec5a4fb99800ba3d30a7f2b689c21dc1e6ff74ceb /sys/netipsec
parent4f63fbc955b7de6332b3c5507f57389dbd6e75f8 (diff)
downloadsrc-4e180881ae4a325cfaeef336aa1a16b0d5bbae67.tar.gz
src-4e180881ae4a325cfaeef336aa1a16b0d5bbae67.zip
uma: implement provisional api for per-cpu zones
Per-cpu zone allocations are very rarely done compared to regular zones. The intent is to avoid pessimizing the latter case with per-cpu specific code. In particular contrary to the claim in r334824, M_ZERO is sometimes being used for such zones. But the zeroing method is completely different and braching on it in the fast path for regular zones is a waste of time.
Notes
Notes: svn path=/head/; revision=334858
Diffstat (limited to 'sys/netipsec')
-rw-r--r--sys/netipsec/key.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/sys/netipsec/key.c b/sys/netipsec/key.c
index 96ef209518d1..0b90d46d4d0b 100644
--- a/sys/netipsec/key.c
+++ b/sys/netipsec/key.c
@@ -2957,7 +2957,7 @@ key_newsav(const struct sadb_msghdr *mhp, struct secasindex *saidx,
goto done;
}
mtx_init(sav->lock, "ipsec association", NULL, MTX_DEF);
- sav->lft_c = uma_zalloc(V_key_lft_zone, M_NOWAIT);
+ sav->lft_c = uma_zalloc_pcpu(V_key_lft_zone, M_NOWAIT);
if (sav->lft_c == NULL) {
*errp = ENOBUFS;
goto done;
@@ -3049,7 +3049,7 @@ done:
free(sav->lock, M_IPSEC_MISC);
}
if (sav->lft_c != NULL)
- uma_zfree(V_key_lft_zone, sav->lft_c);
+ uma_zfree_pcpu(V_key_lft_zone, sav->lft_c);
free(sav, M_IPSEC_SA), sav = NULL;
}
if (sah != NULL)