aboutsummaryrefslogtreecommitdiff
path: root/sys/kern
diff options
context:
space:
mode:
authorAlexander Motin <mav@FreeBSD.org>2013-12-11 21:48:04 +0000
committerAlexander Motin <mav@FreeBSD.org>2013-12-11 21:48:04 +0000
commit1cf78c85c5be4eee2c1765a03547379752bc4e76 (patch)
tree207a5361c8933fef304eb2f0f69b7a9f05e1f0f2 /sys/kern
parenta61a3b338fce6c01a91ad0e98e42b5ff22be98d6 (diff)
downloadsrc-1cf78c85c5be4eee2c1765a03547379752bc4e76.tar.gz
src-1cf78c85c5be4eee2c1765a03547379752bc4e76.zip
Create own free list for each of the first 32 possible allocation sizes.
In case of 4K allocation quantum that means for allocations up to 128K. With growth of memory fragmentation these lists may grow to quite a large sizes (tenths and hundreds of thousands items). Having in one list items of different sizes in worst case may require full linear list traversal, that may be very expensive. Having lists for items of single size means that unless user specify some alignment or border requirements (that are very rare cases) first item found on the list should satisfy the request. While running SPEC NFS benchmark on top of ZFS on 24-core machine with 84GB RAM this change reduces CPU time spent in vmem_xalloc() from 8% and lock congestion spinning around it from 20% to invisible levels. And that all is by the cost of just 26 more pointers per vmem instance. If at some point our kernel will start to actively use KVA allocations with odd sizes above 128K, something may need to be done to bigger lists also.
Notes
Notes: svn path=/head/; revision=259232
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/subr_vmem.c26
1 files changed, 17 insertions, 9 deletions
diff --git a/sys/kern/subr_vmem.c b/sys/kern/subr_vmem.c
index f3f3eecee93d..3b70738389bb 100644
--- a/sys/kern/subr_vmem.c
+++ b/sys/kern/subr_vmem.c
@@ -70,7 +70,10 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_param.h>
#include <vm/vm_pageout.h>
-#define VMEM_MAXORDER (sizeof(vmem_size_t) * NBBY)
+#define VMEM_OPTORDER 5
+#define VMEM_OPTVALUE (1 << VMEM_OPTORDER)
+#define VMEM_MAXORDER \
+ (VMEM_OPTVALUE - 1 + sizeof(vmem_size_t) * NBBY - VMEM_OPTORDER)
#define VMEM_HASHSIZE_MIN 16
#define VMEM_HASHSIZE_MAX 131072
@@ -200,8 +203,10 @@ static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
#define VMEM_CROSS_P(addr1, addr2, boundary) \
((((addr1) ^ (addr2)) & -(boundary)) != 0)
-#define ORDER2SIZE(order) ((vmem_size_t)1 << (order))
-#define SIZE2ORDER(size) ((int)flsl(size) - 1)
+#define ORDER2SIZE(order) ((order) < VMEM_OPTVALUE ? ((order) + 1) : \
+ (vmem_size_t)1 << ((order) - (VMEM_OPTVALUE - VMEM_OPTORDER - 1)))
+#define SIZE2ORDER(size) ((size) <= VMEM_OPTVALUE ? ((size) - 1) : \
+ (flsl(size) + (VMEM_OPTVALUE - VMEM_OPTORDER - 2)))
/*
* Maximum number of boundary tags that may be required to satisfy an
@@ -334,11 +339,14 @@ bt_free(vmem_t *vm, bt_t *bt)
/*
* freelist[0] ... [1, 1]
- * freelist[1] ... [2, 3]
- * freelist[2] ... [4, 7]
- * freelist[3] ... [8, 15]
+ * freelist[1] ... [2, 2]
* :
- * freelist[n] ... [(1 << n), (1 << (n + 1)) - 1]
+ * freelist[29] ... [30, 30]
+ * freelist[30] ... [31, 31]
+ * freelist[31] ... [32, 63]
+ * freelist[33] ... [64, 127]
+ * :
+ * freelist[n] ... [(1 << (n - 26)), (1 << (n - 25)) - 1]
* :
*/
@@ -979,6 +987,7 @@ vmem_init(vmem_t *vm, const char *name, vmem_addr_t base, vmem_size_t size,
int i;
MPASS(quantum > 0);
+ MPASS((quantum & (quantum - 1)) == 0);
bzero(vm, sizeof(*vm));
@@ -988,8 +997,7 @@ vmem_init(vmem_t *vm, const char *name, vmem_addr_t base, vmem_size_t size,
LIST_INIT(&vm->vm_freetags);
strlcpy(vm->vm_name, name, sizeof(vm->vm_name));
vm->vm_quantum_mask = quantum - 1;
- vm->vm_quantum_shift = SIZE2ORDER(quantum);
- MPASS(ORDER2SIZE(vm->vm_quantum_shift) == quantum);
+ vm->vm_quantum_shift = flsl(quantum) - 1;
vm->vm_nbusytag = 0;
vm->vm_size = 0;
vm->vm_inuse = 0;