aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJeff Roberson <jeff@FreeBSD.org>2019-11-28 07:49:25 +0000
committerJeff Roberson <jeff@FreeBSD.org>2019-11-28 07:49:25 +0000
commit584061b4807c5e1b03c10107730ab45e9f5e65d7 (patch)
tree3c9049d5da4b67106b1ba3f26b3ceb0526e6c603
parent3c5a4af6e943231ea75d58f6cf85b785a156b7b9 (diff)
downloadsrc-584061b4807c5e1b03c10107730ab45e9f5e65d7.tar.gz
src-584061b4807c5e1b03c10107730ab45e9f5e65d7.zip
Garbage collect the mostly unused us_keg field. Use appropriately named
union members in vm_page.h to store the zone and slab. Remove some nearby dead code. Reviewed by: markj Differential Revision: https://reviews.freebsd.org/D22564
Notes
Notes: svn path=/head/; revision=355169
-rw-r--r--sys/kern/kern_malloc.c20
-rw-r--r--sys/vm/uma_core.c56
-rw-r--r--sys/vm/uma_int.h20
-rw-r--r--sys/vm/vm_page.h5
4 files changed, 42 insertions, 59 deletions
diff --git a/sys/kern/kern_malloc.c b/sys/kern/kern_malloc.c
index 500dc8264390..de2a043e5911 100644
--- a/sys/kern/kern_malloc.c
+++ b/sys/kern/kern_malloc.c
@@ -738,6 +738,7 @@ free_dbg(void **addrp, struct malloc_type *mtp)
void
free(void *addr, struct malloc_type *mtp)
{
+ uma_zone_t zone;
uma_slab_t slab;
u_long size;
@@ -749,17 +750,17 @@ free(void *addr, struct malloc_type *mtp)
if (addr == NULL)
return;
- slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK));
+ vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab);
if (slab == NULL)
panic("free: address %p(%p) has not been allocated.\n",
addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
if (!(slab->us_flags & UMA_SLAB_MALLOC)) {
- size = slab->us_keg->uk_size;
+ size = zone->uz_size;
#ifdef INVARIANTS
free_save_type(addr, mtp, size);
#endif
- uma_zfree_arg(LIST_FIRST(&slab->us_keg->uk_zones), addr, slab);
+ uma_zfree_arg(zone, addr, slab);
} else {
size = slab->us_size;
uma_large_free(slab);
@@ -770,6 +771,7 @@ free(void *addr, struct malloc_type *mtp)
void
free_domain(void *addr, struct malloc_type *mtp)
{
+ uma_zone_t zone;
uma_slab_t slab;
u_long size;
@@ -782,18 +784,17 @@ free_domain(void *addr, struct malloc_type *mtp)
if (addr == NULL)
return;
- slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK));
+ vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab);
if (slab == NULL)
panic("free_domain: address %p(%p) has not been allocated.\n",
addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
if (!(slab->us_flags & UMA_SLAB_MALLOC)) {
- size = slab->us_keg->uk_size;
+ size = zone->uz_size;
#ifdef INVARIANTS
free_save_type(addr, mtp, size);
#endif
- uma_zfree_domain(LIST_FIRST(&slab->us_keg->uk_zones),
- addr, slab);
+ uma_zfree_domain(zone, addr, slab);
} else {
size = slab->us_size;
uma_large_free(slab);
@@ -807,6 +808,7 @@ free_domain(void *addr, struct malloc_type *mtp)
void *
realloc(void *addr, size_t size, struct malloc_type *mtp, int flags)
{
+ uma_zone_t zone;
uma_slab_t slab;
unsigned long alloc;
void *newaddr;
@@ -834,7 +836,7 @@ realloc(void *addr, size_t size, struct malloc_type *mtp, int flags)
slab = NULL;
alloc = redzone_get_size(addr);
#else
- slab = vtoslab((vm_offset_t)addr & ~(UMA_SLAB_MASK));
+ vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab);
/* Sanity check */
KASSERT(slab != NULL,
@@ -842,7 +844,7 @@ realloc(void *addr, size_t size, struct malloc_type *mtp, int flags)
/* Get the size of the original block */
if (!(slab->us_flags & UMA_SLAB_MALLOC))
- alloc = slab->us_keg->uk_size;
+ alloc = zone->uz_size;
else
alloc = slab->us_size;
diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c
index 4a240969024b..0fa1393b1ca0 100644
--- a/sys/vm/uma_core.c
+++ b/sys/vm/uma_core.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
- * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org>
+ * Copyright (c) 2002-2019 Jeffrey Roberson <jeff@FreeBSD.org>
* Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
* Copyright (c) 2004-2006 Robert N. M. Watson
* All rights reserved.
@@ -275,7 +275,6 @@ static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int);
static void bucket_free(uma_zone_t zone, uma_bucket_t, void *);
static void bucket_zone_drain(void);
static uma_bucket_t zone_alloc_bucket(uma_zone_t, void *, int, int);
-static uma_slab_t zone_fetch_slab(uma_zone_t, uma_keg_t, int, int);
static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab);
static void slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item);
static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
@@ -1210,9 +1209,9 @@ keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int flags,
if (keg->uk_flags & UMA_ZONE_VTOSLAB)
for (i = 0; i < keg->uk_ppera; i++)
- vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
+ vsetzoneslab((vm_offset_t)mem + (i * PAGE_SIZE),
+ zone, slab);
- slab->us_keg = keg;
slab->us_data = mem;
slab->us_freecount = keg->uk_ipers;
slab->us_flags = sflags;
@@ -3017,10 +3016,8 @@ restart:
for (;;) {
slab = keg_fetch_free_slab(keg, domain, rr, flags);
- if (slab != NULL) {
- MPASS(slab->us_keg == keg);
+ if (slab != NULL)
return (slab);
- }
/*
* M_NOVM means don't ask at all!
@@ -3039,7 +3036,6 @@ restart:
* at least one item.
*/
if (slab) {
- MPASS(slab->us_keg == keg);
dom = &keg->uk_domain[slab->us_domain];
LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
return (slab);
@@ -3062,33 +3058,11 @@ restart:
* fail.
*/
if ((slab = keg_fetch_free_slab(keg, domain, rr, flags)) != NULL) {
- MPASS(slab->us_keg == keg);
return (slab);
}
return (NULL);
}
-static uma_slab_t
-zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int domain, int flags)
-{
- uma_slab_t slab;
-
- if (keg == NULL) {
- keg = zone->uz_keg;
- KEG_LOCK(keg);
- }
-
- for (;;) {
- slab = keg_fetch_slab(keg, zone, domain, flags);
- if (slab)
- return (slab);
- if (flags & (M_NOWAIT | M_NOVM))
- break;
- }
- KEG_UNLOCK(keg);
- return (NULL);
-}
-
static void *
slab_alloc_item(uma_keg_t keg, uma_slab_t slab)
{
@@ -3096,7 +3070,6 @@ slab_alloc_item(uma_keg_t keg, uma_slab_t slab)
void *item;
uint8_t freei;
- MPASS(keg == slab->us_keg);
KEG_LOCK_ASSERT(keg);
freei = BIT_FFS(SLAB_SETSIZE, &slab->us_free) - 1;
@@ -3126,12 +3099,12 @@ zone_import(uma_zone_t zone, void **bucket, int max, int domain, int flags)
int i;
slab = NULL;
- keg = NULL;
+ keg = zone->uz_keg;
+ KEG_LOCK(keg);
/* Try to keep the buckets totally full */
for (i = 0; i < max; ) {
- if ((slab = zone_fetch_slab(zone, keg, domain, flags)) == NULL)
+ if ((slab = keg_fetch_slab(keg, zone, domain, flags)) == NULL)
break;
- keg = slab->us_keg;
#ifdef NUMA
stripe = howmany(max, vm_ndomains);
#endif
@@ -3157,8 +3130,7 @@ zone_import(uma_zone_t zone, void **bucket, int max, int domain, int flags)
flags &= ~M_WAITOK;
flags |= M_NOWAIT;
}
- if (slab != NULL)
- KEG_UNLOCK(keg);
+ KEG_UNLOCK(keg);
return i;
}
@@ -3599,7 +3571,6 @@ slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item)
keg = zone->uz_keg;
MPASS(zone->uz_lockptr == &keg->uk_lock);
KEG_LOCK_ASSERT(keg);
- MPASS(keg == slab->us_keg);
dom = &keg->uk_domain[slab->us_domain];
@@ -3642,10 +3613,8 @@ zone_release(uma_zone_t zone, void **bucket, int cnt)
mem += keg->uk_pgoff;
slab = (uma_slab_t)mem;
}
- } else {
+ } else
slab = vtoslab((vm_offset_t)item);
- MPASS(slab->us_keg == keg);
- }
slab_free_item(zone, slab, item);
}
KEG_UNLOCK(keg);
@@ -3996,7 +3965,6 @@ uma_prealloc(uma_zone_t zone, int items)
slab = keg_alloc_slab(keg, zone, domain, M_WAITOK,
aflags);
if (slab != NULL) {
- MPASS(slab->us_keg == keg);
dom = &keg->uk_domain[slab->us_domain];
LIST_INSERT_HEAD(&dom->ud_free_slab, slab,
us_link);
@@ -4134,7 +4102,7 @@ uma_large_malloc_domain(vm_size_t size, int domain, int wait)
DOMAINSET_FIXED(domain);
addr = kmem_malloc_domainset(policy, size, wait);
if (addr != 0) {
- vsetslab(addr, slab);
+ vsetzoneslab(addr, NULL, slab);
slab->us_data = (void *)addr;
slab->us_flags = UMA_SLAB_KERNEL | UMA_SLAB_MALLOC;
slab->us_size = size;
@@ -4546,7 +4514,7 @@ uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item)
panic("uma: item %p did not belong to zone %s\n",
item, zone->uz_name);
}
- keg = slab->us_keg;
+ keg = zone->uz_keg;
freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
if (BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree))
@@ -4574,7 +4542,7 @@ uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
panic("uma: Freed item %p did not belong to zone %s\n",
item, zone->uz_name);
}
- keg = slab->us_keg;
+ keg = zone->uz_keg;
freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
if (freei >= keg->uk_ipers)
diff --git a/sys/vm/uma_int.h b/sys/vm/uma_int.h
index aa8405aafe52..d8a8e3f6019d 100644
--- a/sys/vm/uma_int.h
+++ b/sys/vm/uma_int.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
- * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org>
+ * Copyright (c) 2002-2019 Jeffrey Roberson <jeff@FreeBSD.org>
* Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
* All rights reserved.
*
@@ -281,7 +281,6 @@ BITSET_DEFINE(slabbits, SLAB_SETSIZE);
* store and subdivides it into individually allocatable items.
*/
struct uma_slab {
- uma_keg_t us_keg; /* Keg we live in */
union {
LIST_ENTRY(uma_slab) _us_link; /* slabs in zone */
unsigned long _us_size; /* Size of allocation */
@@ -478,16 +477,27 @@ vtoslab(vm_offset_t va)
vm_page_t p;
p = PHYS_TO_VM_PAGE(pmap_kextract(va));
- return ((uma_slab_t)p->plinks.s.pv);
+ return (p->plinks.uma.slab);
}
static __inline void
-vsetslab(vm_offset_t va, uma_slab_t slab)
+vtozoneslab(vm_offset_t va, uma_zone_t *zone, uma_slab_t *slab)
{
vm_page_t p;
p = PHYS_TO_VM_PAGE(pmap_kextract(va));
- p->plinks.s.pv = slab;
+ *slab = p->plinks.uma.slab;
+ *zone = p->plinks.uma.zone;
+}
+
+static __inline void
+vsetzoneslab(vm_offset_t va, uma_zone_t zone, uma_slab_t slab)
+{
+ vm_page_t p;
+
+ p = PHYS_TO_VM_PAGE(pmap_kextract(va));
+ p->plinks.uma.slab = slab;
+ p->plinks.uma.zone = zone;
}
/*
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index 788c029dec8e..d762c37fcad8 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -220,12 +220,15 @@ struct vm_page {
TAILQ_ENTRY(vm_page) q; /* page queue or free list (Q) */
struct {
SLIST_ENTRY(vm_page) ss; /* private slists */
- void *pv;
} s;
struct {
u_long p;
u_long v;
} memguard;
+ struct {
+ void *slab;
+ void *zone;
+ } uma;
} plinks;
TAILQ_ENTRY(vm_page) listq; /* pages in same object (O) */
vm_object_t object; /* which object am I in (O) */