aboutsummaryrefslogtreecommitdiff
path: root/sys/sparc64
diff options
context:
space:
mode:
authorJake Burkholder <jake@FreeBSD.org>2002-08-10 22:14:16 +0000
committerJake Burkholder <jake@FreeBSD.org>2002-08-10 22:14:16 +0000
commit5aebb4029139c2675b97d1373ef2a379405d7c7c (patch)
tree7349581d304c20934c8c25ed2dcc7c74735a7e7e /sys/sparc64
parentd64915d6e373efc67190d1220865e76c5eaf046f (diff)
downloadsrc-5aebb4029139c2675b97d1373ef2a379405d7c7c.tar.gz
src-5aebb4029139c2675b97d1373ef2a379405d7c7c.zip
Auto size available kernel virtual address space based on phsyical memory
size. This avoids blowing out kva in kmeminit() on large memory machines (4 gigs or more). Reviewed by: tmm
Notes
Notes: svn path=/head/; revision=101653
Diffstat (limited to 'sys/sparc64')
-rw-r--r--sys/sparc64/include/tsb.h9
-rw-r--r--sys/sparc64/include/vmparam.h25
-rw-r--r--sys/sparc64/sparc64/exception.S48
-rw-r--r--sys/sparc64/sparc64/genassym.c1
-rw-r--r--sys/sparc64/sparc64/pmap.c87
-rw-r--r--sys/sparc64/sparc64/tsb.c2
6 files changed, 113 insertions, 59 deletions
diff --git a/sys/sparc64/include/tsb.h b/sys/sparc64/include/tsb.h
index a12b6349554f..20db029aff9d 100644
--- a/sys/sparc64/include/tsb.h
+++ b/sys/sparc64/include/tsb.h
@@ -43,12 +43,9 @@
(TSB_BSHIFT - TSB_BUCKET_SHIFT - TTE_SHIFT)
#define TSB_BUCKET_MASK ((1 << TSB_BUCKET_ADDRESS_BITS) - 1)
-#define TSB_KERNEL_SIZE \
- ((KVA_PAGES * PAGE_SIZE_4M) / sizeof(struct tte))
-#define TSB_KERNEL_MASK (TSB_KERNEL_SIZE - 1)
-#define TSB_KERNEL_VA_MASK (TSB_KERNEL_MASK << TTE_SHIFT)
-
extern struct tte *tsb_kernel;
+extern vm_size_t tsb_kernel_mask;
+extern vm_size_t tsb_kernel_size;
extern vm_offset_t tsb_kernel_phys;
static __inline struct tte *
@@ -66,7 +63,7 @@ tsb_vtobucket(pmap_t pm, vm_offset_t va)
static __inline struct tte *
tsb_kvpntotte(vm_offset_t vpn)
{
- return (&tsb_kernel[vpn & TSB_KERNEL_MASK]);
+ return (&tsb_kernel[vpn & tsb_kernel_mask]);
}
static __inline struct tte *
diff --git a/sys/sparc64/include/vmparam.h b/sys/sparc64/include/vmparam.h
index ec080e6c9afa..7b916bc952fa 100644
--- a/sys/sparc64/include/vmparam.h
+++ b/sys/sparc64/include/vmparam.h
@@ -88,9 +88,9 @@
* that if this moves above the va hole, we will have to deal with sign
* extension of virtual addresses.
*/
-#define VM_MAXUSER_ADDRESS ((vm_offset_t)0x7fe00000000)
+#define VM_MAXUSER_ADDRESS (0x7fe00000000UL)
-#define VM_MIN_ADDRESS ((vm_offset_t)0)
+#define VM_MIN_ADDRESS (0UL)
#define VM_MAX_ADDRESS (VM_MAXUSER_ADDRESS)
/*
@@ -116,19 +116,6 @@
#endif
/*
- * Number of 4 meg pages to use for the kernel tsb.
- */
-#ifndef KVA_PAGES
-#define KVA_PAGES (1)
-#endif
-
-/*
- * Range of kernel virtual addresses. max = min + range.
- */
-#define KVA_RANGE \
- ((KVA_PAGES * PAGE_SIZE_4M) << (PAGE_SHIFT - TTE_SHIFT))
-
-/*
* Lowest kernel virtual address, where the kernel is loaded. This is also
* arbitrary. We pick a resonably low address, which allows all of kernel
* text, data and bss to be below the 4 gigabyte mark, yet still high enough
@@ -136,12 +123,12 @@
* same as for x86 with default KVA_PAGES...
*/
#define VM_MIN_KERNEL_ADDRESS (0xc0000000)
-#define VM_MAX_KERNEL_ADDRESS (VM_MIN_KERNEL_ADDRESS + KVA_RANGE - PAGE_SIZE)
-#define KERNBASE (VM_MIN_KERNEL_ADDRESS)
-
#define VM_MIN_PROM_ADDRESS (0xf0000000)
#define VM_MAX_PROM_ADDRESS (0xffffe000)
+#define KERNBASE (VM_MIN_KERNEL_ADDRESS)
+#define VM_MAX_KERNEL_ADDRESS (vm_max_kernel_address)
+
/*
* Initial pagein size of beginning of executable file.
*/
@@ -149,4 +136,6 @@
#define VM_INITIAL_PAGEIN 16
#endif
+extern vm_offset_t vm_max_kernel_address;
+
#endif /* !_MACHINE_VMPARAM_H_ */
diff --git a/sys/sparc64/sparc64/exception.S b/sys/sparc64/sparc64/exception.S
index 14ed0ce8cd16..d8e124547f15 100644
--- a/sys/sparc64/sparc64/exception.S
+++ b/sys/sparc64/sparc64/exception.S
@@ -67,6 +67,9 @@
#include "assym.s"
+#define TSB_KERNEL_MASK 0x0
+#define TSB_KERNEL 0x0
+
.register %g2,#ignore
.register %g3,#ignore
.register %g6,#ignore
@@ -1360,15 +1363,22 @@ END(intr_enqueue)
srlx %g6, TAR_VPN_SHIFT, %g6
/*
- * Find the index into the kernel tsb.
+ * Find the index into the kernel tsb. The tsb mask gets patched at
+ * startup.
*/
- set TSB_KERNEL_MASK, %g4
+ .globl tl1_immu_miss_load_tsb_mask
+tl1_immu_miss_load_tsb_mask:
+ sethi %hi(TSB_KERNEL_MASK), %g4
+ or %g4, %lo(TSB_KERNEL_MASK), %g4
and %g6, %g4, %g3
/*
- * Compute the tte address.
+ * Compute the tte address. The address of the kernel tsb gets
+ * patched at startup.
*/
- ldxa [%g0 + AA_IMMU_TSB] %asi, %g4
+ .globl tl1_immu_miss_load_tsb
+tl1_immu_miss_load_tsb:
+ sethi %hi(TSB_KERNEL), %g4
sllx %g3, TTE_SHIFT, %g3
add %g3, %g4, %g3
@@ -1449,16 +1459,23 @@ END(tl1_immu_miss_trap)
EMPTY
/*
- * Find the index into the kernel tsb.
+ * Find the index into the kernel tsb. The tsb mask gets patched at
+ * startup.
*/
- set TSB_KERNEL_MASK, %g4
+ .globl tl1_dmmu_miss_load_tsb_mask
+tl1_dmmu_miss_load_tsb_mask:
+ sethi %hi(TSB_KERNEL_MASK), %g4
+ or %g4, %lo(TSB_KERNEL_MASK), %g4
srlx %g6, TAR_VPN_SHIFT, %g6
and %g6, %g4, %g3
/*
- * Compute the tte address.
+ * Compute the tte address. The address of the kernel tsb gets
+ * patched at startup.
*/
- ldxa [%g0 + AA_DMMU_TSB] %asi, %g4
+ .globl tl1_dmmu_miss_load_tsb
+tl1_dmmu_miss_load_tsb:
+ sethi %hi(TSB_KERNEL), %g4
sllx %g3, TTE_SHIFT, %g3
add %g3, %g4, %g3
@@ -1606,16 +1623,23 @@ END(tl1_dmmu_miss_user)
mov %g6, %g2
/*
- * Find the index into the kernel tsb.
+ * Find the index into the kernel tsb. The tsb mask gets patched at
+ * startup.
*/
- set TSB_KERNEL_MASK, %g4
+ .globl tl1_dmmu_prot_load_tsb_mask
+tl1_dmmu_prot_load_tsb_mask:
+ sethi %hi(TSB_KERNEL_MASK), %g4
+ or %g4, %lo(TSB_KERNEL_MASK), %g4
srlx %g6, TAR_VPN_SHIFT, %g6
and %g6, %g4, %g5
/*
- * Compute the tte address.
+ * Compute the tte address. The address of the kernel tsb gets
+ * patched at startup.
*/
- ldxa [%g0 + AA_DMMU_TSB] %asi, %g4
+ .globl tl1_dmmu_prot_load_tsb
+tl1_dmmu_prot_load_tsb:
+ sethi %hi(TSB_KERNEL), %g4
sllx %g5, TTE_SHIFT, %g5
add %g4, %g5, %g3
diff --git a/sys/sparc64/sparc64/genassym.c b/sys/sparc64/sparc64/genassym.c
index 534c35a11ab6..57827b613d8d 100644
--- a/sys/sparc64/sparc64/genassym.c
+++ b/sys/sparc64/sparc64/genassym.c
@@ -100,7 +100,6 @@ ASSYM(TLB_DIRECT_SHIFT, TLB_DIRECT_SHIFT);
ASSYM(TSB_BUCKET_ADDRESS_BITS, TSB_BUCKET_ADDRESS_BITS);
ASSYM(TSB_BUCKET_SHIFT, TSB_BUCKET_SHIFT);
-ASSYM(TSB_KERNEL_MASK, TSB_KERNEL_MASK);
ASSYM(INT_SHIFT, INT_SHIFT);
ASSYM(PTR_SHIFT, PTR_SHIFT);
diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c
index ca7fd0114c14..3a6eef64ab5a 100644
--- a/sys/sparc64/sparc64/pmap.c
+++ b/sys/sparc64/sparc64/pmap.c
@@ -93,6 +93,7 @@
#include <machine/cache.h>
#include <machine/frame.h>
+#include <machine/instr.h>
#include <machine/md_var.h>
#include <machine/metadata.h>
#include <machine/smp.h>
@@ -146,6 +147,8 @@ vm_offset_t virtual_avail;
vm_offset_t virtual_end;
vm_offset_t kernel_vm_end;
+vm_offset_t vm_max_kernel_address;
+
/*
* Kernel pmap.
*/
@@ -160,6 +163,13 @@ static vm_offset_t pmap_bootstrap_alloc(vm_size_t size);
static vm_offset_t pmap_map_direct(vm_page_t m);
+extern int tl1_immu_miss_load_tsb[];
+extern int tl1_immu_miss_load_tsb_mask[];
+extern int tl1_dmmu_miss_load_tsb[];
+extern int tl1_dmmu_miss_load_tsb_mask[];
+extern int tl1_dmmu_prot_load_tsb[];
+extern int tl1_dmmu_prot_load_tsb_mask[];
+
/*
* If user pmap is processed with pmap_remove and with pmap_remove and the
* resident count drops to 0, there are no more pages to remove, so we
@@ -267,6 +277,7 @@ pmap_bootstrap(vm_offset_t ekva)
vm_offset_t pa;
vm_offset_t va;
vm_size_t physsz;
+ vm_size_t virtsz;
ihandle_t pmem;
ihandle_t vmem;
int sz;
@@ -274,13 +285,6 @@ pmap_bootstrap(vm_offset_t ekva)
int j;
/*
- * Set the start and end of kva. The kernel is loaded at the first
- * available 4 meg super page, so round up to the end of the page.
- */
- virtual_avail = roundup2(ekva, PAGE_SIZE_4M);
- virtual_end = VM_MAX_KERNEL_ADDRESS;
-
- /*
* Find out what physical memory is available from the prom and
* initialize the phys_avail array. This must be done before
* pmap_bootstrap_alloc is called.
@@ -309,17 +313,64 @@ pmap_bootstrap(vm_offset_t ekva)
}
physmem = btoc(physsz);
+ virtsz = roundup(physsz, PAGE_SIZE_4M << (PAGE_SHIFT - TTE_SHIFT));
+ vm_max_kernel_address = VM_MIN_KERNEL_ADDRESS + virtsz;
+ tsb_kernel_size = virtsz >> (PAGE_SHIFT - TTE_SHIFT);
+ tsb_kernel_mask = (tsb_kernel_size >> TTE_SHIFT) - 1;
+
/*
- * Allocate the kernel tsb and lock it in the tlb.
+ * Set the start and end of kva. The kernel is loaded at the first
+ * available 4 meg super page, so round up to the end of the page.
*/
- pa = pmap_bootstrap_alloc(KVA_PAGES * PAGE_SIZE_4M);
+ virtual_avail = roundup2(ekva, PAGE_SIZE_4M);
+ virtual_end = vm_max_kernel_address;
+
+ /*
+ * Allocate the kernel tsb.
+ */
+ pa = pmap_bootstrap_alloc(tsb_kernel_size);
if (pa & PAGE_MASK_4M)
panic("pmap_bootstrap: tsb unaligned\n");
tsb_kernel_phys = pa;
tsb_kernel = (struct tte *)virtual_avail;
- virtual_avail += KVA_PAGES * PAGE_SIZE_4M;
+ virtual_avail += tsb_kernel_size;
+
+ /*
+ * Patch the virtual address and the tsb mask into the trap table.
+ */
+#define SETHI_G4(x) \
+ EIF_OP(IOP_FORM2) | EIF_F2_RD(4) | EIF_F2_OP2(INS0_SETHI) | \
+ EIF_IMM((x) >> 10, 22)
+#define OR_G4_I_G4(x) \
+ EIF_OP(IOP_MISC) | EIF_F3_RD(4) | EIF_F3_OP3(INS2_OR) | \
+ EIF_F3_RS1(4) | EIF_F3_I(1) | EIF_IMM(x, 10)
+
+ tl1_immu_miss_load_tsb[0] = SETHI_G4((vm_offset_t)tsb_kernel);
+ tl1_immu_miss_load_tsb_mask[0] = SETHI_G4(tsb_kernel_mask);
+ tl1_immu_miss_load_tsb_mask[1] = OR_G4_I_G4(tsb_kernel_mask);
+ flush(tl1_immu_miss_load_tsb);
+ flush(tl1_immu_miss_load_tsb_mask);
+ flush(tl1_immu_miss_load_tsb_mask + 1);
+
+ tl1_dmmu_miss_load_tsb[0] = SETHI_G4((vm_offset_t)tsb_kernel);
+ tl1_dmmu_miss_load_tsb_mask[0] = SETHI_G4(tsb_kernel_mask);
+ tl1_dmmu_miss_load_tsb_mask[1] = OR_G4_I_G4(tsb_kernel_mask);
+ flush(tl1_dmmu_miss_load_tsb);
+ flush(tl1_dmmu_miss_load_tsb_mask);
+ flush(tl1_dmmu_miss_load_tsb_mask + 1);
+
+ tl1_dmmu_prot_load_tsb[0] = SETHI_G4((vm_offset_t)tsb_kernel);
+ tl1_dmmu_prot_load_tsb_mask[0] = SETHI_G4(tsb_kernel_mask);
+ tl1_dmmu_prot_load_tsb_mask[1] = OR_G4_I_G4(tsb_kernel_mask);
+ flush(tl1_dmmu_prot_load_tsb);
+ flush(tl1_dmmu_prot_load_tsb_mask);
+ flush(tl1_dmmu_prot_load_tsb_mask + 1);
+
+ /*
+ * Lock it in the tlb.
+ */
pmap_map_tsb();
- bzero(tsb_kernel, KVA_PAGES * PAGE_SIZE_4M);
+ bzero(tsb_kernel, tsb_kernel_size);
/*
* Enter fake 8k pages for the 4MB kernel pages, so that
@@ -431,9 +482,9 @@ pmap_map_tsb(void)
/*
* Map the 4mb tsb pages.
*/
- for (i = 0; i < KVA_PAGES; i++) {
- va = (vm_offset_t)tsb_kernel + i * PAGE_SIZE_4M;
- pa = tsb_kernel_phys + i * PAGE_SIZE_4M;
+ for (i = 0; i < tsb_kernel_size; i += PAGE_SIZE_4M) {
+ va = (vm_offset_t)tsb_kernel + i;
+ pa = tsb_kernel_phys + i;
/* XXX - cheetah */
data = TD_V | TD_4M | TD_PA(pa) | TD_L | TD_CP | TD_CV |
TD_P | TD_W;
@@ -443,14 +494,6 @@ pmap_map_tsb(void)
}
/*
- * Load the tsb registers.
- */
- stxa(AA_DMMU_TSB, ASI_DMMU, (vm_offset_t)tsb_kernel);
- stxa(AA_IMMU_TSB, ASI_IMMU, (vm_offset_t)tsb_kernel);
- membar(Sync);
- flush(tsb_kernel);
-
- /*
* Set the secondary context to be the kernel context (needed for
* fp block operations in the kernel and the cache code).
*/
diff --git a/sys/sparc64/sparc64/tsb.c b/sys/sparc64/sparc64/tsb.c
index ade7610faecc..5083073ad58f 100644
--- a/sys/sparc64/sparc64/tsb.c
+++ b/sys/sparc64/sparc64/tsb.c
@@ -92,6 +92,8 @@ SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, tsb_nforeach, CTLFLAG_RD,
#endif
struct tte *tsb_kernel;
+vm_size_t tsb_kernel_mask;
+vm_size_t tsb_kernel_size;
vm_offset_t tsb_kernel_phys;
struct tte *