aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMark Johnston <markj@FreeBSD.org>2018-10-22 16:16:42 +0000
committerMark Johnston <markj@FreeBSD.org>2018-10-22 16:16:42 +0000
commit21744c825f73fc2fa3ad2f037f40839d1d3a3331 (patch)
treedc7202ebc39e5d23c7dcfb8e3a8e4c337476fe23
parentd3a4b0dabcd31c0c8a5bf049be688897e49489a0 (diff)
downloadsrc-21744c825f73fc2fa3ad2f037f40839d1d3a3331.tar.gz
src-21744c825f73fc2fa3ad2f037f40839d1d3a3331.zip
Don't import 0 into vmem quantum caches.
vmem uses UMA cache zones to implement the quantum cache. Since uma_zalloc() returns 0 (NULL) to signal an allocation failure, UMA should not be used to cache resource 0. Fix this by ensuring that 0 is never cached in UMA in the first place, and by modifying vmem_alloc() to fall back to a search of the free lists if the cache is depleted, rather than blocking in qc_import(). Reported by and discussed with: Brett Gutstein <bgutstein@rice.edu> Reviewed by: alc MFC after: 2 weeks Differential Revision: https://reviews.freebsd.org/D17483
Notes
Notes: svn path=/head/; revision=339599
-rw-r--r--sys/kern/subr_vmem.c34
-rw-r--r--sys/sys/vmem.h5
2 files changed, 23 insertions, 16 deletions
diff --git a/sys/kern/subr_vmem.c b/sys/kern/subr_vmem.c
index 8529266f381c..c93460f96da7 100644
--- a/sys/kern/subr_vmem.c
+++ b/sys/kern/subr_vmem.c
@@ -504,6 +504,9 @@ bt_insfree(vmem_t *vm, bt_t *bt)
/*
* Import from the arena into the quantum cache in UMA.
+ *
+ * We use VMEM_ADDR_QCACHE_MIN instead of 0: uma_zalloc() returns 0 to indicate
+ * failure, so UMA can't be used to cache a resource with value 0.
*/
static int
qc_import(void *arg, void **store, int cnt, int domain, int flags)
@@ -512,19 +515,16 @@ qc_import(void *arg, void **store, int cnt, int domain, int flags)
vmem_addr_t addr;
int i;
+ KASSERT((flags & M_WAITOK) == 0, ("blocking allocation"));
+
qc = arg;
- if ((flags & VMEM_FITMASK) == 0)
- flags |= M_BESTFIT;
for (i = 0; i < cnt; i++) {
if (vmem_xalloc(qc->qc_vmem, qc->qc_size, 0, 0, 0,
- VMEM_ADDR_MIN, VMEM_ADDR_MAX, flags, &addr) != 0)
+ VMEM_ADDR_QCACHE_MIN, VMEM_ADDR_MAX, flags, &addr) != 0)
break;
store[i] = (void *)addr;
- /* Only guarantee one allocation. */
- flags &= ~M_WAITOK;
- flags |= M_NOWAIT;
}
- return i;
+ return (i);
}
/*
@@ -1123,15 +1123,20 @@ vmem_alloc(vmem_t *vm, vmem_size_t size, int flags, vmem_addr_t *addrp)
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_alloc");
if (size <= vm->vm_qcache_max) {
+ /*
+ * Resource 0 cannot be cached, so avoid a blocking allocation
+ * in qc_import() and give the vmem_xalloc() call below a chance
+ * to return 0.
+ */
qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift];
- *addrp = (vmem_addr_t)uma_zalloc(qc->qc_cache, flags);
- if (*addrp == 0)
- return (ENOMEM);
- return (0);
+ *addrp = (vmem_addr_t)uma_zalloc(qc->qc_cache,
+ (flags & ~M_WAITOK) | M_NOWAIT);
+ if (__predict_true(*addrp != 0))
+ return (0);
}
- return vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
- flags, addrp);
+ return (vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
+ flags, addrp));
}
int
@@ -1263,7 +1268,8 @@ vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
qcache_t *qc;
MPASS(size > 0);
- if (size <= vm->vm_qcache_max) {
+ if (size <= vm->vm_qcache_max &&
+ __predict_true(addr >= VMEM_ADDR_QCACHE_MIN)) {
qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift];
uma_zfree(qc->qc_cache, (void *)addr);
} else
diff --git a/sys/sys/vmem.h b/sys/sys/vmem.h
index e9dc54d154cb..e74d1e3fe803 100644
--- a/sys/sys/vmem.h
+++ b/sys/sys/vmem.h
@@ -41,8 +41,9 @@ typedef struct vmem vmem_t;
typedef uintptr_t vmem_addr_t;
typedef size_t vmem_size_t;
-#define VMEM_ADDR_MIN 0
-#define VMEM_ADDR_MAX (~(vmem_addr_t)0)
+#define VMEM_ADDR_MIN 0
+#define VMEM_ADDR_QCACHE_MIN 1
+#define VMEM_ADDR_MAX (~(vmem_addr_t)0)
typedef int (vmem_import_t)(void *, vmem_size_t, int, vmem_addr_t *);
typedef void (vmem_release_t)(void *, vmem_addr_t, vmem_size_t);