aboutsummaryrefslogtreecommitdiff
path: root/sys/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'sys/arm64')
-rw-r--r--sys/arm64/arm64/cpu_feat.c51
-rw-r--r--sys/arm64/arm64/efirt_machdep.c14
-rw-r--r--sys/arm64/arm64/exception.S9
-rw-r--r--sys/arm64/arm64/identcpu.c55
-rw-r--r--sys/arm64/arm64/locore.S45
-rw-r--r--sys/arm64/arm64/machdep.c33
-rw-r--r--sys/arm64/arm64/pmap.c140
-rw-r--r--sys/arm64/arm64/ptrauth.c53
-rw-r--r--sys/arm64/conf/std.arm641
-rw-r--r--sys/arm64/conf/std.dev1
-rw-r--r--sys/arm64/include/armreg.h34
-rw-r--r--sys/arm64/include/cpu.h30
-rw-r--r--sys/arm64/include/cpu_feat.h52
-rw-r--r--sys/arm64/include/hypervisor.h79
-rw-r--r--sys/arm64/include/pmap.h3
-rw-r--r--sys/arm64/include/proc.h1
-rw-r--r--sys/arm64/include/vmm.h1
-rw-r--r--sys/arm64/rockchip/rk_gpio.c197
-rw-r--r--sys/arm64/rockchip/rk_tsadc.c2
-rw-r--r--sys/arm64/vmm/arm64.h3
-rw-r--r--sys/arm64/vmm/hyp.h1
-rw-r--r--sys/arm64/vmm/io/vtimer.c94
-rw-r--r--sys/arm64/vmm/io/vtimer.h2
-rw-r--r--sys/arm64/vmm/vmm.c6
-rw-r--r--sys/arm64/vmm/vmm_arm64.c15
-rw-r--r--sys/arm64/vmm/vmm_hyp.c84
26 files changed, 757 insertions, 249 deletions
diff --git a/sys/arm64/arm64/cpu_feat.c b/sys/arm64/arm64/cpu_feat.c
index cc262394913d..94114d47f846 100644
--- a/sys/arm64/arm64/cpu_feat.c
+++ b/sys/arm64/arm64/cpu_feat.c
@@ -32,16 +32,21 @@
#include <machine/cpu.h>
#include <machine/cpu_feat.h>
+SYSCTL_NODE(_hw, OID_AUTO, feat, CTLFLAG_RD, 0, "CPU features/errata");
+
/* TODO: Make this a list if we ever grow a callback other than smccc_errata */
static cpu_feat_errata_check_fn cpu_feat_check_cb = NULL;
void
enable_cpu_feat(uint32_t stage)
{
+ char tunable[32];
struct cpu_feat **featp, *feat;
uint32_t midr;
u_int errata_count, *errata_list;
cpu_feat_errata errata_status;
+ cpu_feat_en check_status;
+ bool val;
MPASS((stage & ~CPU_FEAT_STAGE_MASK) == 0);
@@ -49,6 +54,21 @@ enable_cpu_feat(uint32_t stage)
SET_FOREACH(featp, cpu_feat_set) {
feat = *featp;
+ /* Read any tunable the user may have set */
+ if (stage == CPU_FEAT_EARLY_BOOT && PCPU_GET(cpuid) == 0) {
+ snprintf(tunable, sizeof(tunable), "hw.feat.%s",
+ feat->feat_name);
+ if (TUNABLE_BOOL_FETCH(tunable, &val)) {
+ if (val) {
+ feat->feat_flags |=
+ CPU_FEAT_USER_ENABLED;
+ } else {
+ feat->feat_flags |=
+ CPU_FEAT_USER_DISABLED;
+ }
+ }
+ }
+
/* Run the enablement code at the correct stage of boot */
if ((feat->feat_flags & CPU_FEAT_STAGE_MASK) != stage)
continue;
@@ -58,8 +78,26 @@ enable_cpu_feat(uint32_t stage)
PCPU_GET(cpuid) != 0)
continue;
- if (feat->feat_check != NULL && !feat->feat_check(feat, midr))
- continue;
+ if (feat->feat_check != NULL) {
+ check_status = feat->feat_check(feat, midr);
+ } else {
+ check_status = FEAT_DEFAULT_ENABLE;
+ }
+ /* Ignore features that are not present */
+ if (check_status == FEAT_ALWAYS_DISABLE)
+ goto next;
+
+ /* The user disabled the feature */
+ if ((feat->feat_flags & CPU_FEAT_USER_DISABLED) != 0)
+ goto next;
+
+ /*
+ * The feature was disabled by default and the user
+ * didn't enable it then skip.
+ */
+ if (check_status == FEAT_DEFAULT_DISABLE &&
+ (feat->feat_flags & CPU_FEAT_USER_ENABLED) == 0)
+ goto next;
/*
* Check if the feature has any errata that may need a
@@ -97,8 +135,13 @@ enable_cpu_feat(uint32_t stage)
/* Shouldn't be possible */
MPASS(errata_status != ERRATA_UNKNOWN);
- feat->feat_enable(feat, errata_status, errata_list,
- errata_count);
+ if (feat->feat_enable(feat, errata_status, errata_list,
+ errata_count))
+ feat->feat_enabled = true;
+
+next:
+ if (!feat->feat_enabled && feat->feat_disabled != NULL)
+ feat->feat_disabled(feat);
}
}
diff --git a/sys/arm64/arm64/efirt_machdep.c b/sys/arm64/arm64/efirt_machdep.c
index 0301eb91c9ef..bde0d4f784dc 100644
--- a/sys/arm64/arm64/efirt_machdep.c
+++ b/sys/arm64/arm64/efirt_machdep.c
@@ -106,7 +106,8 @@ efi_1t1_l3(vm_offset_t va)
if (*l0 == 0) {
m = efi_1t1_page();
mphys = VM_PAGE_TO_PHYS(m);
- *l0 = PHYS_TO_PTE(mphys) | L0_TABLE;
+ *l0 = PHYS_TO_PTE(mphys) | TATTR_UXN_TABLE |
+ TATTR_AP_TABLE_NO_EL0 | L0_TABLE;
} else {
mphys = PTE_TO_PHYS(*l0);
}
@@ -117,7 +118,8 @@ efi_1t1_l3(vm_offset_t va)
if (*l1 == 0) {
m = efi_1t1_page();
mphys = VM_PAGE_TO_PHYS(m);
- *l1 = PHYS_TO_PTE(mphys) | L1_TABLE;
+ *l1 = PHYS_TO_PTE(mphys) | TATTR_UXN_TABLE |
+ TATTR_AP_TABLE_NO_EL0 | L1_TABLE;
} else {
mphys = PTE_TO_PHYS(*l1);
}
@@ -128,7 +130,8 @@ efi_1t1_l3(vm_offset_t va)
if (*l2 == 0) {
m = efi_1t1_page();
mphys = VM_PAGE_TO_PHYS(m);
- *l2 = PHYS_TO_PTE(mphys) | L2_TABLE;
+ *l2 = PHYS_TO_PTE(mphys) | TATTR_UXN_TABLE |
+ TATTR_AP_TABLE_NO_EL0 | L2_TABLE;
} else {
mphys = PTE_TO_PHYS(*l2);
}
@@ -218,8 +221,9 @@ efi_create_1t1_map(struct efi_md *map, int ndesc, int descsz)
p->md_phys, mode, p->md_pages);
}
- l3_attr = ATTR_AF | pmap_sh_attr | ATTR_S1_IDX(mode) |
- ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_nG | L3_PAGE;
+ l3_attr = ATTR_S1_UXN | ATTR_AF | pmap_sh_attr |
+ ATTR_S1_IDX(mode) | ATTR_S1_AP(ATTR_S1_AP_RW) |
+ ATTR_S1_nG | L3_PAGE;
if (mode == VM_MEMATTR_DEVICE || p->md_attr & EFI_MD_ATTR_XP)
l3_attr |= ATTR_S1_XN;
diff --git a/sys/arm64/arm64/exception.S b/sys/arm64/arm64/exception.S
index 13095def8b00..5a4181348a54 100644
--- a/sys/arm64/arm64/exception.S
+++ b/sys/arm64/arm64/exception.S
@@ -42,10 +42,9 @@
*/
.macro save_registers_head el
.if \el == 1
- mov x18, sp
- stp x0, x1, [sp, #(TF_X - TF_SIZE - 128)]!
+ stp x0, x1, [sp, #-(TF_SIZE - TF_X + 128)]!
.else
- stp x0, x1, [sp, #(TF_X - TF_SIZE)]!
+ stp x0, x1, [sp, #-(TF_SIZE - TF_X)]!
.endif
stp x2, x3, [sp, #(2 * 8)]
stp x4, x5, [sp, #(4 * 8)]
@@ -61,7 +60,9 @@
stp x24, x25, [sp, #(24 * 8)]
stp x26, x27, [sp, #(26 * 8)]
stp x28, x29, [sp, #(28 * 8)]
-.if \el == 0
+.if \el == 1
+ add x18, sp, #(TF_SIZE - TF_X + 128)
+.else
mrs x18, sp_el0
.endif
mrs x10, elr_el1
diff --git a/sys/arm64/arm64/identcpu.c b/sys/arm64/arm64/identcpu.c
index bcacea43ad2f..2d07420bcdb0 100644
--- a/sys/arm64/arm64/identcpu.c
+++ b/sys/arm64/arm64/identcpu.c
@@ -232,6 +232,10 @@ static const struct cpu_parts cpu_parts_arm[] = {
{ CPU_PART_CORTEX_X2, "Cortex-X2" },
{ CPU_PART_CORTEX_X3, "Cortex-X3" },
{ CPU_PART_CORTEX_X4, "Cortex-X4" },
+ { CPU_PART_C1_NANO, "C1-Nano" },
+ { CPU_PART_C1_PRO, "C1-Pro" },
+ { CPU_PART_C1_PREMIUM, "C1-Premium" },
+ { CPU_PART_C1_ULTRA, "C1-Ultra" },
{ CPU_PART_NEOVERSE_E1, "Neoverse-E1" },
{ CPU_PART_NEOVERSE_N1, "Neoverse-N1" },
{ CPU_PART_NEOVERSE_N2, "Neoverse-N2" },
@@ -2272,37 +2276,25 @@ static const struct mrs_user_reg user_regs[] = {
static bool
user_ctr_has_neoverse_n1_1542419(uint32_t midr, uint64_t ctr)
{
- /* Skip non-Neoverse-N1 */
- if (!CPU_MATCH(CPU_IMPL_MASK | CPU_PART_MASK, CPU_IMPL_ARM,
- CPU_PART_NEOVERSE_N1, 0, 0))
- return (false);
-
- switch (CPU_VAR(midr)) {
- default:
- break;
- case 4:
- /* Fixed in r4p1 */
- if (CPU_REV(midr) > 0)
- break;
- /* FALLTHROUGH */
- case 3:
- /* If DIC is enabled (coherent icache) then we are affected */
- return (CTR_DIC_VAL(ctr) != 0);
- }
-
- return (false);
+ /*
+ * Neoverse-N1 erratum 1542419
+ * Present in r3p0 - r4p0
+ * Fixed in r4p1
+ */
+ return (midr_check_var_part_range(midr, CPU_IMPL_ARM,
+ CPU_PART_NEOVERSE_N1, 3, 0, 4, 0) && CTR_DIC_VAL(ctr) != 0);
}
-static bool
-user_ctr_check(const struct cpu_feat *feat __unused, u_int midr __unused)
+static cpu_feat_en
+user_ctr_check(const struct cpu_feat *feat __unused, u_int midr)
{
if (emulate_ctr)
- return (true);
+ return (FEAT_DEFAULT_ENABLE);
if (user_ctr_has_neoverse_n1_1542419(midr, READ_SPECIALREG(ctr_el0)))
- return (true);
+ return (FEAT_DEFAULT_ENABLE);
- return (false);
+ return (FEAT_ALWAYS_DISABLE);
}
static bool
@@ -2320,7 +2312,7 @@ user_ctr_has_errata(const struct cpu_feat *feat __unused, u_int midr,
return (false);
}
-static void
+static bool
user_ctr_enable(const struct cpu_feat *feat __unused,
cpu_feat_errata errata_status, u_int *errata_list, u_int errata_count)
{
@@ -2356,16 +2348,13 @@ user_ctr_enable(const struct cpu_feat *feat __unused,
WRITE_SPECIALREG(sctlr_el1,
READ_SPECIALREG(sctlr_el1) & ~SCTLR_UCT);
isb();
+
+ return (true);
}
-static struct cpu_feat user_ctr = {
- .feat_name = "Trap CTR_EL0",
- .feat_check = user_ctr_check,
- .feat_has_errata = user_ctr_has_errata,
- .feat_enable = user_ctr_enable,
- .feat_flags = CPU_FEAT_AFTER_DEV | CPU_FEAT_PER_CPU,
-};
-DATA_SET(cpu_feat_set, user_ctr);
+CPU_FEAT(trap_ctr, "Trap CTR_EL0",
+ user_ctr_check, user_ctr_has_errata, user_ctr_enable, NULL,
+ CPU_FEAT_AFTER_DEV | CPU_FEAT_PER_CPU);
static bool
user_ctr_handler(uint64_t esr, struct trapframe *frame)
diff --git a/sys/arm64/arm64/locore.S b/sys/arm64/arm64/locore.S
index 4a10a2b4f2d3..d35e334905a7 100644
--- a/sys/arm64/arm64/locore.S
+++ b/sys/arm64/arm64/locore.S
@@ -39,6 +39,23 @@
#define VIRT_BITS 48
+/*
+ * Loads a 64-bit value into reg using 1 to 4 mov/movk instructions.
+ * This can be used early on when we don't know the CPUs endianness.
+ */
+.macro mov_q reg, val
+ mov \reg, :abs_g0_nc:\val
+.if (\val >> 16) & 0xffff != 0
+ movk \reg, :abs_g1_nc:\val
+.endif
+.if (\val >> 32) & 0xffff != 0
+ movk \reg, :abs_g2_nc:\val
+.endif
+.if (\val >> 48) & 0xffff != 0
+ movk \reg, :abs_g3:\val
+.endif
+.endm
+
#if PAGE_SIZE == PAGE_SIZE_16K
/*
* The number of level 3 tables to create. 32 will allow for 1G of address
@@ -324,15 +341,23 @@ LENTRY(enter_kernel_el)
cmp x23, #(CURRENTEL_EL_EL2)
b.eq 1f
- ldr x2, =SCTLR_MMU_OFF
+ /*
+ * Ensure there are no memory operations here. If the boot loader
+ * enters the kernel in big-endian mode then loading sctlr will
+ * be incorrect. As instructions are the same in both endians it is
+ * safe to use mov instructions.
+ */
+ mov_q x2, SCTLR_MMU_OFF
msr sctlr_el1, x2
- /* SCTLR_EOS is set so eret is a context synchronizing event so we
+ /*
+ * SCTLR_EOS is set to make eret a context synchronizing event. We
* need an isb here to ensure it's observed by later instructions,
* but don't need it in the eret below.
*/
isb
- /* Ensure SPSR_EL1 and pstate are in sync. The only wat to set the
+ /*
+ * Ensure SPSR_EL1 and pstate are in sync. The only way to set the
* latter is to set the former and return from an exception with eret.
*/
mov x2, #(PSR_DAIF | PSR_M_EL1h)
@@ -346,11 +371,19 @@ LENTRY(enter_kernel_el)
* Set just the reserved bits in sctlr_el2. This will disable the
* MMU which may have broken the kernel if we enter the kernel in
* EL2, e.g. when using VHE.
+ *
+ * As with sctlr_el1 above use mov instructions to ensure there are
+ * no memory operations.
*/
- ldr x2, =(SCTLR_EL2_RES1 | SCTLR_EL2_EIS | SCTLR_EL2_EOS)
+ mov_q x2, (SCTLR_EL2_RES1 | SCTLR_EL2_EIS | SCTLR_EL2_EOS)
msr sctlr_el2, x2
isb
+ /*
+ * The hardware is now in little-endian mode so memory operations
+ * are safe.
+ */
+
/* Configure the Hypervisor */
ldr x2, =(HCR_RW | HCR_APK | HCR_API)
msr hcr_el2, x2
@@ -385,7 +418,7 @@ LENTRY(enter_kernel_el)
msr SCTLR_EL12_REG, x2
mov x2, xzr /* CPTR_EL2 is managed by vfp.c */
- ldr x3, =(CNTHCTL_E2H_EL1PCTEN | CNTHCTL_E2H_EL1PTEN)
+ ldr x3, =(CNTHCTL_E2H_EL1PCTEN_NOTRAP | CNTHCTL_E2H_EL1PTEN_NOTRAP)
ldr x5, =(PSR_DAIF | PSR_M_EL2h)
b .Ldone_vhe
@@ -396,7 +429,7 @@ LENTRY(enter_kernel_el)
msr vbar_el2, x2
ldr x2, =(CPTR_RES1)
- ldr x3, =(CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN)
+ ldr x3, =(CNTHCTL_EL1PCTEN_NOTRAP | CNTHCTL_EL1PCEN_NOTRAP)
ldr x5, =(PSR_DAIF | PSR_M_EL1h)
.Ldone_vhe:
diff --git a/sys/arm64/arm64/machdep.c b/sys/arm64/arm64/machdep.c
index 53856dd90cae..322bad273a08 100644
--- a/sys/arm64/arm64/machdep.c
+++ b/sys/arm64/arm64/machdep.c
@@ -173,16 +173,20 @@ SYSINIT(ssp_warn, SI_SUB_COPYRIGHT, SI_ORDER_ANY, print_ssp_warning, NULL);
SYSINIT(ssp_warn2, SI_SUB_LAST, SI_ORDER_ANY, print_ssp_warning, NULL);
#endif
-static bool
+static cpu_feat_en
pan_check(const struct cpu_feat *feat __unused, u_int midr __unused)
{
uint64_t id_aa64mfr1;
- id_aa64mfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
- return (ID_AA64MMFR1_PAN_VAL(id_aa64mfr1) != ID_AA64MMFR1_PAN_NONE);
+ if (!get_kernel_reg(ID_AA64MMFR1_EL1, &id_aa64mfr1))
+ return (FEAT_ALWAYS_DISABLE);
+ if (ID_AA64MMFR1_PAN_VAL(id_aa64mfr1) == ID_AA64MMFR1_PAN_NONE)
+ return (FEAT_ALWAYS_DISABLE);
+
+ return (FEAT_DEFAULT_ENABLE);
}
-static void
+static bool
pan_enable(const struct cpu_feat *feat __unused,
cpu_feat_errata errata_status __unused, u_int *errata_list __unused,
u_int errata_count __unused)
@@ -200,15 +204,20 @@ pan_enable(const struct cpu_feat *feat __unused,
".arch_extension pan \n"
"msr pan, #1 \n"
".arch_extension nopan \n");
+
+ return (true);
}
-static struct cpu_feat feat_pan = {
- .feat_name = "FEAT_PAN",
- .feat_check = pan_check,
- .feat_enable = pan_enable,
- .feat_flags = CPU_FEAT_EARLY_BOOT | CPU_FEAT_PER_CPU,
-};
-DATA_SET(cpu_feat_set, feat_pan);
+static void
+pan_disabled(const struct cpu_feat *feat __unused)
+{
+ if (PCPU_GET(cpuid) == 0)
+ update_special_reg(ID_AA64MMFR1_EL1, ID_AA64MMFR1_PAN_MASK, 0);
+}
+
+CPU_FEAT(feat_pan, "Privileged access never",
+ pan_check, NULL, pan_enable, pan_disabled,
+ CPU_FEAT_AFTER_DEV | CPU_FEAT_PER_CPU);
bool
has_hyp(void)
@@ -857,7 +866,7 @@ initarm(struct arm64_bootparams *abp)
cninit();
set_ttbr0(abp->kern_ttbr0);
- cpu_tlb_flushID();
+ pmap_s1_invalidate_all_kernel();
if (!valid)
panic("Invalid bus configuration: %s",
diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c
index ec89c4573799..dbf5c820d20b 100644
--- a/sys/arm64/arm64/pmap.c
+++ b/sys/arm64/arm64/pmap.c
@@ -190,6 +190,8 @@ pt_entry_t __read_mostly pmap_gp_attr;
#define PMAP_SAN_PTE_BITS (ATTR_AF | ATTR_S1_XN | pmap_sh_attr | \
ATTR_KERN_GP | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | ATTR_S1_AP(ATTR_S1_AP_RW))
+static bool __read_mostly pmap_multiple_tlbi = false;
+
struct pmap_large_md_page {
struct rwlock pv_lock;
struct md_page pv_page;
@@ -1297,7 +1299,7 @@ pmap_bootstrap_dmap(vm_size_t kernlen)
}
}
- cpu_tlb_flushID();
+ pmap_s1_invalidate_all_kernel();
bs_state.dmap_valid = true;
@@ -1399,7 +1401,7 @@ pmap_bootstrap(void)
/* And the l3 tables for the early devmap */
pmap_bootstrap_l3(VM_MAX_KERNEL_ADDRESS - (PMAP_MAPDEV_EARLY_SIZE));
- cpu_tlb_flushID();
+ pmap_s1_invalidate_all_kernel();
#define alloc_pages(var, np) \
(var) = bs_state.freemempos; \
@@ -1656,14 +1658,17 @@ pmap_init_pv_table(void)
}
}
-static bool
+static cpu_feat_en
pmap_dbm_check(const struct cpu_feat *feat __unused, u_int midr __unused)
{
uint64_t id_aa64mmfr1;
id_aa64mmfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
- return (ID_AA64MMFR1_HAFDBS_VAL(id_aa64mmfr1) >=
- ID_AA64MMFR1_HAFDBS_AF_DBS);
+ if (ID_AA64MMFR1_HAFDBS_VAL(id_aa64mmfr1) >=
+ ID_AA64MMFR1_HAFDBS_AF_DBS)
+ return (FEAT_DEFAULT_ENABLE);
+
+ return (FEAT_ALWAYS_DISABLE);
}
static bool
@@ -1671,8 +1676,8 @@ pmap_dbm_has_errata(const struct cpu_feat *feat __unused, u_int midr,
u_int **errata_list, u_int *errata_count)
{
/* Disable on Cortex-A55 for erratum 1024718 - all revisions */
- if (CPU_MATCH(CPU_IMPL_MASK | CPU_PART_MASK, CPU_IMPL_ARM,
- CPU_PART_CORTEX_A55, 0, 0)) {
+ if (CPU_IMPL(midr) == CPU_IMPL_ARM &&
+ CPU_PART(midr) == CPU_PART_CORTEX_A55) {
static u_int errata_id = 1024718;
*errata_list = &errata_id;
@@ -1681,21 +1686,19 @@ pmap_dbm_has_errata(const struct cpu_feat *feat __unused, u_int midr,
}
/* Disable on Cortex-A510 for erratum 2051678 - r0p0 to r0p2 */
- if (CPU_MATCH(CPU_IMPL_MASK | CPU_PART_MASK | CPU_VAR_MASK,
- CPU_IMPL_ARM, CPU_PART_CORTEX_A510, 0, 0)) {
- if (CPU_REV(PCPU_GET(midr)) < 3) {
- static u_int errata_id = 2051678;
+ if (midr_check_var_part_range(midr, CPU_IMPL_ARM, CPU_PART_CORTEX_A510,
+ 0, 0, 0, 2)) {
+ static u_int errata_id = 2051678;
- *errata_list = &errata_id;
- *errata_count = 1;
- return (true);
- }
+ *errata_list = &errata_id;
+ *errata_count = 1;
+ return (true);
}
return (false);
}
-static void
+static bool
pmap_dbm_enable(const struct cpu_feat *feat __unused,
cpu_feat_errata errata_status, u_int *errata_list __unused,
u_int errata_count)
@@ -1704,7 +1707,7 @@ pmap_dbm_enable(const struct cpu_feat *feat __unused,
/* Skip if there is an erratum affecting DBM */
if (errata_status != ERRATA_NONE)
- return;
+ return (false);
tcr = READ_SPECIALREG(tcr_el1) | TCR_HD;
WRITE_SPECIALREG(tcr_el1, tcr);
@@ -1714,16 +1717,58 @@ pmap_dbm_enable(const struct cpu_feat *feat __unused,
__asm __volatile("tlbi vmalle1");
dsb(nsh);
isb();
+
+ return (true);
}
-static struct cpu_feat feat_dbm = {
- .feat_name = "FEAT_HAFDBS (DBM)",
- .feat_check = pmap_dbm_check,
- .feat_has_errata = pmap_dbm_has_errata,
- .feat_enable = pmap_dbm_enable,
- .feat_flags = CPU_FEAT_AFTER_DEV | CPU_FEAT_PER_CPU,
-};
-DATA_SET(cpu_feat_set, feat_dbm);
+CPU_FEAT(feat_hafdbs, "Hardware management of the Access flag and dirty state",
+ pmap_dbm_check, pmap_dbm_has_errata, pmap_dbm_enable, NULL,
+ CPU_FEAT_AFTER_DEV | CPU_FEAT_PER_CPU);
+
+static cpu_feat_en
+pmap_multiple_tlbi_check(const struct cpu_feat *feat __unused, u_int midr)
+{
+ /*
+ * Cortex-A55 erratum 2441007 (Cat B rare)
+ * Present in all revisions
+ */
+ if (CPU_IMPL(midr) == CPU_IMPL_ARM &&
+ CPU_PART(midr) == CPU_PART_CORTEX_A55)
+ return (FEAT_DEFAULT_DISABLE);
+
+ /*
+ * Cortex-A76 erratum 1286807 (Cat B rare)
+ * Present in r0p0 - r3p0
+ * Fixed in r3p1
+ */
+ if (midr_check_var_part_range(midr, CPU_IMPL_ARM, CPU_PART_CORTEX_A76,
+ 0, 0, 3, 0))
+ return (FEAT_DEFAULT_DISABLE);
+
+ /*
+ * Cortex-A510 erratum 2441009 (Cat B rare)
+ * Present in r0p0 - r1p1
+ * Fixed in r1p2
+ */
+ if (midr_check_var_part_range(midr, CPU_IMPL_ARM, CPU_PART_CORTEX_A510,
+ 0, 0, 1, 1))
+ return (FEAT_DEFAULT_DISABLE);
+
+ return (FEAT_ALWAYS_DISABLE);
+}
+
+static bool
+pmap_multiple_tlbi_enable(const struct cpu_feat *feat __unused,
+ cpu_feat_errata errata_status, u_int *errata_list __unused,
+ u_int errata_count __unused)
+{
+ pmap_multiple_tlbi = true;
+ return (true);
+}
+
+CPU_FEAT(errata_multi_tlbi, "Multiple TLBI errata",
+ pmap_multiple_tlbi_check, NULL, pmap_multiple_tlbi_enable, NULL,
+ CPU_FEAT_EARLY_BOOT | CPU_FEAT_PER_CPU);
/*
* Initialize the pmap module.
@@ -1878,9 +1923,17 @@ pmap_s1_invalidate_page(pmap_t pmap, vm_offset_t va, bool final_only)
r = TLBI_VA(va);
if (pmap == kernel_pmap) {
pmap_s1_invalidate_kernel(r, final_only);
+ if (pmap_multiple_tlbi) {
+ dsb(ish);
+ pmap_s1_invalidate_kernel(r, final_only);
+ }
} else {
r |= ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie));
pmap_s1_invalidate_user(r, final_only);
+ if (pmap_multiple_tlbi) {
+ dsb(ish);
+ pmap_s1_invalidate_user(r, final_only);
+ }
}
dsb(ish);
isb();
@@ -1922,12 +1975,24 @@ pmap_s1_invalidate_strided(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
end = TLBI_VA(eva);
for (r = start; r < end; r += TLBI_VA(stride))
pmap_s1_invalidate_kernel(r, final_only);
+
+ if (pmap_multiple_tlbi) {
+ dsb(ish);
+ for (r = start; r < end; r += TLBI_VA(stride))
+ pmap_s1_invalidate_kernel(r, final_only);
+ }
} else {
start = end = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie));
start |= TLBI_VA(sva);
end |= TLBI_VA(eva);
for (r = start; r < end; r += TLBI_VA(stride))
pmap_s1_invalidate_user(r, final_only);
+
+ if (pmap_multiple_tlbi) {
+ dsb(ish);
+ for (r = start; r < end; r += TLBI_VA(stride))
+ pmap_s1_invalidate_user(r, final_only);
+ }
}
dsb(ish);
isb();
@@ -1963,6 +2028,19 @@ pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
pmap_s2_invalidate_range(pmap, sva, eva, final_only);
}
+void
+pmap_s1_invalidate_all_kernel(void)
+{
+ dsb(ishst);
+ __asm __volatile("tlbi vmalle1is");
+ dsb(ish);
+ if (pmap_multiple_tlbi) {
+ __asm __volatile("tlbi vmalle1is");
+ dsb(ish);
+ }
+ isb();
+}
+
/*
* Invalidates all cached intermediate- and final-level TLB entries for the
* given virtual address space.
@@ -1977,9 +2055,17 @@ pmap_s1_invalidate_all(pmap_t pmap)
dsb(ishst);
if (pmap == kernel_pmap) {
__asm __volatile("tlbi vmalle1is");
+ if (pmap_multiple_tlbi) {
+ dsb(ish);
+ __asm __volatile("tlbi vmalle1is");
+ }
} else {
r = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie));
__asm __volatile("tlbi aside1is, %0" : : "r" (r));
+ if (pmap_multiple_tlbi) {
+ dsb(ish);
+ __asm __volatile("tlbi aside1is, %0" : : "r" (r));
+ }
}
dsb(ish);
isb();
@@ -7967,7 +8053,7 @@ pmap_mapbios(vm_paddr_t pa, vm_size_t size)
pa += L2_SIZE;
}
if ((old_l2e & ATTR_DESCR_VALID) != 0)
- pmap_s1_invalidate_all(kernel_pmap);
+ pmap_s1_invalidate_all_kernel();
else {
/*
* Because the old entries were invalid and the new
@@ -8058,7 +8144,7 @@ pmap_unmapbios(void *p, vm_size_t size)
}
}
if (preinit_map) {
- pmap_s1_invalidate_all(kernel_pmap);
+ pmap_s1_invalidate_all_kernel();
return;
}
diff --git a/sys/arm64/arm64/ptrauth.c b/sys/arm64/arm64/ptrauth.c
index dbe0c69b8d60..ab40b72887e9 100644
--- a/sys/arm64/arm64/ptrauth.c
+++ b/sys/arm64/arm64/ptrauth.c
@@ -82,7 +82,7 @@ ptrauth_disable(void)
return (false);
}
-static bool
+static cpu_feat_en
ptrauth_check(const struct cpu_feat *feat __unused, u_int midr __unused)
{
uint64_t isar;
@@ -97,11 +97,11 @@ ptrauth_check(const struct cpu_feat *feat __unused, u_int midr __unused)
if (!pac_enable) {
if (boothowto & RB_VERBOSE)
printf("Pointer authentication is disabled\n");
- goto out;
+ return (FEAT_ALWAYS_DISABLE);
}
if (ptrauth_disable())
- goto out;
+ return (FEAT_ALWAYS_DISABLE);
/*
* This assumes if there is pointer authentication on the boot CPU
@@ -116,32 +116,21 @@ ptrauth_check(const struct cpu_feat *feat __unused, u_int midr __unused)
if (get_kernel_reg(ID_AA64ISAR1_EL1, &isar)) {
if (ID_AA64ISAR1_APA_VAL(isar) > 0 ||
ID_AA64ISAR1_API_VAL(isar) > 0) {
- return (true);
+ return (FEAT_DEFAULT_ENABLE);
}
}
/* The QARMA3 algorithm is reported in ID_AA64ISAR2_EL1. */
if (get_kernel_reg(ID_AA64ISAR2_EL1, &isar)) {
if (ID_AA64ISAR2_APA3_VAL(isar) > 0) {
- return (true);
+ return (FEAT_DEFAULT_ENABLE);
}
}
-out:
- /*
- * Pointer authentication may be disabled, mask out the ID fields we
- * expose to userspace and the rest of the kernel so they don't try
- * to use it.
- */
- update_special_reg(ID_AA64ISAR1_EL1, ID_AA64ISAR1_API_MASK |
- ID_AA64ISAR1_APA_MASK | ID_AA64ISAR1_GPA_MASK |
- ID_AA64ISAR1_GPI_MASK, 0);
- update_special_reg(ID_AA64ISAR2_EL1, ID_AA64ISAR2_APA3_MASK, 0);
-
- return (false);
+ return (FEAT_ALWAYS_DISABLE);
}
-static void
+static bool
ptrauth_enable(const struct cpu_feat *feat __unused,
cpu_feat_errata errata_status __unused, u_int *errata_list __unused,
u_int errata_count __unused)
@@ -153,16 +142,30 @@ ptrauth_enable(const struct cpu_feat *feat __unused,
elf64_addr_mask_14.code |= PAC_ADDR_MASK_14;
elf64_addr_mask_14.data |= PAC_ADDR_MASK_14;
#endif
+
+ return (true);
}
+static void
+ptrauth_disabled(const struct cpu_feat *feat __unused)
+{
+ /*
+ * Pointer authentication may be disabled, mask out the ID fields we
+ * expose to userspace and the rest of the kernel so they don't try
+ * to use it.
+ */
+ if (PCPU_GET(cpuid) == 0) {
+ update_special_reg(ID_AA64ISAR1_EL1, ID_AA64ISAR1_API_MASK |
+ ID_AA64ISAR1_APA_MASK | ID_AA64ISAR1_GPA_MASK |
+ ID_AA64ISAR1_GPI_MASK, 0);
+ update_special_reg(ID_AA64ISAR2_EL1, ID_AA64ISAR2_APA3_MASK, 0);
+ }
+
+}
-static struct cpu_feat feat_pauth = {
- .feat_name = "FEAT_PAuth",
- .feat_check = ptrauth_check,
- .feat_enable = ptrauth_enable,
- .feat_flags = CPU_FEAT_EARLY_BOOT | CPU_FEAT_SYSTEM,
-};
-DATA_SET(cpu_feat_set, feat_pauth);
+CPU_FEAT(feat_pauth, "Pointer Authentication",
+ ptrauth_check, NULL, ptrauth_enable, ptrauth_disabled,
+ CPU_FEAT_EARLY_BOOT | CPU_FEAT_SYSTEM);
/* Copy the keys when forking a new process */
void
diff --git a/sys/arm64/conf/std.arm64 b/sys/arm64/conf/std.arm64
index c83e98c17a33..a0568466cfaf 100644
--- a/sys/arm64/conf/std.arm64
+++ b/sys/arm64/conf/std.arm64
@@ -7,6 +7,7 @@ makeoptions WITH_CTF=1 # Run ctfconvert(1) for DTrace support
options SCHED_ULE # ULE scheduler
options NUMA # Non-Uniform Memory Architecture support
options PREEMPTION # Enable kernel thread preemption
+options EXTERR_STRINGS
options VIMAGE # Subsystem virtualization, e.g. VNET
options INET # InterNETworking
options INET6 # IPv6 communications protocols
diff --git a/sys/arm64/conf/std.dev b/sys/arm64/conf/std.dev
index 719f272426dd..c5c364ffda04 100644
--- a/sys/arm64/conf/std.dev
+++ b/sys/arm64/conf/std.dev
@@ -115,7 +115,6 @@ device mmcsd # mmc/sd flash cards
options HID_DEBUG # enable debug msgs
device hid # Generic HID support
device hidbus # Generic HID Bus
-options U2F_MAKE_UHID_ALIAS # install /dev/uhid alias for /dev/u2f/
# Firmware
device mmio_sram # Generic on-chip SRAM
diff --git a/sys/arm64/include/armreg.h b/sys/arm64/include/armreg.h
index 500f35c48787..da051e8f7c8a 100644
--- a/sys/arm64/include/armreg.h
+++ b/sys/arm64/include/armreg.h
@@ -232,6 +232,14 @@
#define CNTP_CTL_IMASK (1 << 1)
#define CNTP_CTL_ISTATUS (1 << 2)
+/* CNTP_CTL_EL02 - Counter-timer Physical Timer Control register */
+#define CNTP_CTL_EL02_REG MRS_REG_ALT_NAME(CNTP_CTL_EL02)
+#define CNTP_CTL_EL02_op0 3
+#define CNTP_CTL_EL02_op1 5
+#define CNTP_CTL_EL02_CRn 14
+#define CNTP_CTL_EL02_CRm 2
+#define CNTP_CTL_EL02_op2 1
+
/* CNTP_CVAL_EL0 - Counter-timer Physical Timer CompareValue register */
#define CNTP_CVAL_EL0_op0 3
#define CNTP_CVAL_EL0_op1 3
@@ -239,6 +247,14 @@
#define CNTP_CVAL_EL0_CRm 2
#define CNTP_CVAL_EL0_op2 2
+/* CNTP_CVAL_EL02 - Counter-timer Physical Timer CompareValue register */
+#define CNTP_CVAL_EL02_REG MRS_REG_ALT_NAME(CNTP_CVAL_EL02)
+#define CNTP_CVAL_EL02_op0 3
+#define CNTP_CVAL_EL02_op1 5
+#define CNTP_CVAL_EL02_CRn 14
+#define CNTP_CVAL_EL02_CRm 2
+#define CNTP_CVAL_EL02_op2 2
+
/* CNTP_TVAL_EL0 - Counter-timer Physical Timer TimerValue register */
#define CNTP_TVAL_EL0_op0 3
#define CNTP_TVAL_EL0_op1 3
@@ -254,6 +270,14 @@
#define CNTPCT_EL0_CRm 0
#define CNTPCT_EL0_op2 1
+/* CNTPCTSS_EL0 - Counter-timer Self-Synchronized Physical Count register */
+#define CNTPCTSS_EL0_REG MRS_REG_ALT_NAME(CNTPCTSS_EL0)
+#define CNTPCTSS_EL0_op0 3
+#define CNTPCTSS_EL0_op1 3
+#define CNTPCTSS_EL0_CRn 14
+#define CNTPCTSS_EL0_CRm 0
+#define CNTPCTSS_EL0_op2 5
+
/* CNTV_CTL_EL0 - Counter-timer Virtual Timer Control register */
#define CNTV_CTL_EL0_op0 3
#define CNTV_CTL_EL0_op1 3
@@ -282,6 +306,14 @@
#define CNTV_CVAL_EL02_CRm 3
#define CNTV_CVAL_EL02_op2 2
+/* CNTVCTSS_EL0 - Counter-timer Self-Synchronized Virtual Count register */
+#define CNTVCTSS_EL0_REG MRS_REG_ALT_NAME(CNTVCTSS_EL0)
+#define CNTVCTSS_EL0_op0 3
+#define CNTVCTSS_EL0_op1 3
+#define CNTVCTSS_EL0_CRn 14
+#define CNTVCTSS_EL0_CRm 0
+#define CNTVCTSS_EL0_op2 6
+
/* CONTEXTIDR_EL1 - Context ID register */
#define CONTEXTIDR_EL1_REG MRS_REG_ALT_NAME(CONTEXTIDR_EL1)
#define CONTEXTIDR_EL1_op0 3
@@ -2612,10 +2644,12 @@
(SCTLR_LSMAOE | SCTLR_nTLSMD | SCTLR_EIS | SCTLR_TSCXT | SCTLR_EOS)
#define SCTLR_MMU_ON \
(SCTLR_MMU_OFF | \
+ SCTLR_EPAN | \
SCTLR_BT1 | \
SCTLR_BT0 | \
SCTLR_UCI | \
SCTLR_SPAN | \
+ SCTLR_IESB | \
SCTLR_nTWE | \
SCTLR_nTWI | \
SCTLR_UCT | \
diff --git a/sys/arm64/include/cpu.h b/sys/arm64/include/cpu.h
index 59cda36f275e..124da8c215ed 100644
--- a/sys/arm64/include/cpu.h
+++ b/sys/arm64/include/cpu.h
@@ -125,7 +125,11 @@
#define CPU_PART_NEOVERSE_V3 0xD84
#define CPU_PART_CORTEX_X925 0xD85
#define CPU_PART_CORTEX_A725 0xD87
+#define CPU_PART_C1_NANO 0xD8A
+#define CPU_PART_C1_PRO 0xD8B
+#define CPU_PART_C1_ULTRA 0xD8C
#define CPU_PART_NEOVERSE_N3 0xD8E
+#define CPU_PART_C1_PREMIUM 0xD90
/* Cavium Part numbers */
#define CPU_PART_THUNDERX 0x0A1
@@ -193,8 +197,30 @@
(((mask) & PCPU_GET(midr)) == \
((mask) & CPU_ID_RAW((impl), (part), (var), (rev))))
-#define CPU_MATCH_RAW(mask, devid) \
- (((mask) & PCPU_GET(midr)) == ((mask) & (devid)))
+#if !defined(__ASSEMBLER__)
+static inline bool
+midr_check_var_part_range(u_int midr, u_int impl, u_int part, u_int var_low,
+ u_int part_low, u_int var_high, u_int part_high)
+{
+ /* Check for the correct part */
+ if (CPU_IMPL(midr) != impl || CPU_PART(midr) != part)
+ return (false);
+
+ /* Check if the variant is between var_low and var_high inclusive */
+ if (CPU_VAR(midr) < var_low || CPU_VAR(midr) > var_high)
+ return (false);
+
+ /* If the variant is the low value, check if the part is high enough */
+ if (CPU_VAR(midr) == var_low && CPU_PART(midr) < part_low)
+ return (false);
+
+ /* If the variant is the high value, check if the part is low enough */
+ if (CPU_VAR(midr) == var_high && CPU_PART(midr) > part_high)
+ return (false);
+
+ return (true);
+}
+#endif
/*
* Chip-specific errata. This defines are intended to be
diff --git a/sys/arm64/include/cpu_feat.h b/sys/arm64/include/cpu_feat.h
index 9fe6a9dd95d9..6a311d4000bb 100644
--- a/sys/arm64/include/cpu_feat.h
+++ b/sys/arm64/include/cpu_feat.h
@@ -29,6 +29,7 @@
#define _MACHINE_CPU_FEAT_H_
#include <sys/linker_set.h>
+#include <sys/sysctl.h>
typedef enum {
ERRATA_UNKNOWN, /* Unknown erratum */
@@ -39,6 +40,31 @@ typedef enum {
/* kernel component. */
} cpu_feat_errata;
+typedef enum {
+ /*
+ * Don't implement the feature or erratum wrokarount,
+ * e.g. the feature is not implemented or erratum is
+ * for another CPU.
+ */
+ FEAT_ALWAYS_DISABLE,
+
+ /*
+ * Disable by default, but allow the user to enable,
+ * e.g. For a rare erratum with a workaround, Arm
+ * Category B (rare) or similar.
+ */
+ FEAT_DEFAULT_DISABLE,
+
+ /*
+ * Enabled by default, bit allow the user to disable,
+ * e.g. For a common erratum with a workaround, Arm
+ * Category A or B or similar.
+ */
+ FEAT_DEFAULT_ENABLE,
+
+ /* We could add FEAT_ALWAYS_ENABLE if a need was found. */
+} cpu_feat_en;
+
#define CPU_FEAT_STAGE_MASK 0x00000001
#define CPU_FEAT_EARLY_BOOT 0x00000000
#define CPU_FEAT_AFTER_DEV 0x00000001
@@ -47,23 +73,45 @@ typedef enum {
#define CPU_FEAT_PER_CPU 0x00000000
#define CPU_FEAT_SYSTEM 0x00000010
+#define CPU_FEAT_USER_ENABLED 0x40000000
+#define CPU_FEAT_USER_DISABLED 0x80000000
+
struct cpu_feat;
-typedef bool (cpu_feat_check)(const struct cpu_feat *, u_int);
+typedef cpu_feat_en (cpu_feat_check)(const struct cpu_feat *, u_int);
typedef bool (cpu_feat_has_errata)(const struct cpu_feat *, u_int,
u_int **, u_int *);
-typedef void (cpu_feat_enable)(const struct cpu_feat *, cpu_feat_errata,
+typedef bool (cpu_feat_enable)(const struct cpu_feat *, cpu_feat_errata,
u_int *, u_int);
+typedef void (cpu_feat_disabled)(const struct cpu_feat *);
struct cpu_feat {
const char *feat_name;
cpu_feat_check *feat_check;
cpu_feat_has_errata *feat_has_errata;
cpu_feat_enable *feat_enable;
+ cpu_feat_disabled *feat_disabled;
uint32_t feat_flags;
+ bool feat_enabled;
};
SET_DECLARE(cpu_feat_set, struct cpu_feat);
+SYSCTL_DECL(_hw_feat);
+
+#define CPU_FEAT(name, descr, check, has_errata, enable, disabled, flags) \
+static struct cpu_feat name = { \
+ .feat_name = #name, \
+ .feat_check = check, \
+ .feat_has_errata = has_errata, \
+ .feat_enable = enable, \
+ .feat_disabled = disabled, \
+ .feat_flags = flags, \
+ .feat_enabled = false, \
+}; \
+DATA_SET(cpu_feat_set, name); \
+SYSCTL_BOOL(_hw_feat, OID_AUTO, name, CTLFLAG_RD, &name.feat_enabled, \
+ 0, descr)
+
/*
* Allow drivers to mark an erratum as worked around, e.g. the Errata
* Management ABI may know the workaround isn't needed on a given system.
diff --git a/sys/arm64/include/hypervisor.h b/sys/arm64/include/hypervisor.h
index e3a880afbe9c..04e15b55b218 100644
--- a/sys/arm64/include/hypervisor.h
+++ b/sys/arm64/include/hypervisor.h
@@ -36,20 +36,77 @@
*/
/* CNTHCTL_EL2 - Counter-timer Hypervisor Control register */
-#define CNTHCTL_EVNTI_MASK (0xf << 4) /* Bit to trigger event stream */
/* Valid if HCR_EL2.E2H == 0 */
-#define CNTHCTL_EL1PCTEN (1 << 0) /* Allow physical counter access */
-#define CNTHCTL_EL1PCEN (1 << 1) /* Allow physical timer access */
+#define CNTHCTL_EL1PCTEN_SHIFT 0
+#define CNTHCTL_EL1PCTEN_MASK (0x1ul << CNTHCTL_E2H_EL1PCTEN_SHIFT)
+#define CNTHCTL_EL1PCTEN_TRAP (0x0ul << CNTHCTL_E2H_EL1PCTEN_SHIFT)
+#define CNTHCTL_EL1PCTEN_NOTRAP (0x1ul << CNTHCTL_EL1PCTEN_SHIFT)
+#define CNTHCTL_EL1PCEN_SHIFT 1
+#define CNTHCTL_EL1PCEN_MASK (0x1ul << CNTHCTL_EL1PCEN_SHIFT)
+#define CNTHCTL_EL1PCEN_TRAP (0x0ul << CNTHCTL_EL1PCEN_SHIFT)
+#define CNTHCTL_EL1PCEN_NOTRAP (0x1ul << CNTHCTL_EL1PCEN_SHIFT)
/* Valid if HCR_EL2.E2H == 1 */
-#define CNTHCTL_E2H_EL0PCTEN (1 << 0) /* Allow EL0 physical counter access */
-#define CNTHCTL_E2H_EL0VCTEN (1 << 1) /* Allow EL0 virtual counter access */
-#define CNTHCTL_E2H_EL0VTEN (1 << 8)
-#define CNTHCTL_E2H_EL0PTEN (1 << 9)
-#define CNTHCTL_E2H_EL1PCTEN (1 << 10) /* Allow physical counter access */
-#define CNTHCTL_E2H_EL1PTEN (1 << 11) /* Allow physical timer access */
+#define CNTHCTL_E2H_EL0PCTEN_SHIFT 0
+#define CNTHCTL_E2H_EL0PCTEN_MASK (0x1ul << CNTHCTL_E2H_EL0PCTEN_SHIFT)
+#define CNTHCTL_E2H_EL0PCTEN_TRAP (0x0ul << CNTHCTL_E2H_EL0PCTEN_SHIFT)
+#define CNTHCTL_E2H_EL0PCTEN_NOTRAP (0x1ul << CNTHCTL_E2H_EL0PCTEN_SHIFT)
+#define CNTHCTL_E2H_EL0VCTEN_SHIFT 1
+#define CNTHCTL_E2H_EL0VCTEN_MASK (0x1ul << CNTHCTL_E2H_EL0VCTEN_SHIFT)
+#define CNTHCTL_E2H_EL0VCTEN_TRAP (0x0ul << CNTHCTL_E2H_EL0VCTEN_SHIFT)
+#define CNTHCTL_E2H_EL0VCTEN_NOTRAP (0x1ul << CNTHCTL_E2H_EL0VCTEN_SHIFT)
+#define CNTHCTL_E2H_EL0VTEN_SHIFT 8
+#define CNTHCTL_E2H_EL0VTEN_MASK (0x1ul << CNTHCTL_E2H_EL0VTEN_SHIFT)
+#define CNTHCTL_E2H_EL0VTEN_TRAP (0x0ul << CNTHCTL_E2H_EL0VTEN_SHIFT)
+#define CNTHCTL_E2H_EL0VTEN_NOTRAP (0x1ul << CNTHCTL_E2H_EL0VTEN_SHIFT)
+#define CNTHCTL_E2H_EL0PTEN_SHIFT 9
+#define CNTHCTL_E2H_EL0PTEN_MASK (0x1ul << CNTHCTL_E2H_EL0PTEN_SHIFT)
+#define CNTHCTL_E2H_EL0PTEN_TRAP (0x0ul << CNTHCTL_E2H_EL0PTEN_SHIFT)
+#define CNTHCTL_E2H_EL0PTEN_NOTRAP (0x1ul << CNTHCTL_E2H_EL0PTEN_SHIFT)
+#define CNTHCTL_E2H_EL1PCTEN_SHIFT 10
+#define CNTHCTL_E2H_EL1PCTEN_MASK (0x1ul << CNTHCTL_E2H_EL1PCTEN_SHIFT)
+#define CNTHCTL_E2H_EL1PCTEN_TRAP (0x0ul << CNTHCTL_E2H_EL1PCTEN_SHIFT)
+#define CNTHCTL_E2H_EL1PCTEN_NOTRAP (0x1ul << CNTHCTL_E2H_EL1PCTEN_SHIFT)
+#define CNTHCTL_E2H_EL1PTEN_SHIFT 11
+#define CNTHCTL_E2H_EL1PTEN_MASK (0x1ul << CNTHCTL_E2H_EL1PTEN_SHIFT)
+#define CNTHCTL_E2H_EL1PTEN_TRAP (0x0ul << CNTHCTL_E2H_EL1PTEN_SHIFT)
+#define CNTHCTL_E2H_EL1PTEN_NOTRAP (0x1ul << CNTHCTL_E2H_EL1PTEN_SHIFT)
/* Unconditionally valid */
-#define CNTHCTL_EVNTDIR (1 << 3) /* Control transition trigger bit */
-#define CNTHCTL_EVNTEN (1 << 2) /* Enable event stream */
+#define CNTHCTL_EVNTEN_SHIFT 2
+#define CNTHCTL_EVNTEN_MASK (0x1ul << CNTHCTL_EVNTEN_SHIFT)
+#define CNTHCTL_EVNTEN_DIS (0x0ul << CNTHCTL_EVNTEN_SHIFT)
+#define CNTHCTL_EVNTEN_EN (0x1ul << CNTHCTL_EVNTEN_SHIFT)
+#define CNTHCTL_EVNTDIR_SHIFT 3
+#define CNTHCTL_EVNTDIR_MASK (0x1ul << CNTHCTL_EVNTDIR_SHIFT)
+#define CNTHCTL_EVNTDIR_HIGH (0x0ul << CNTHCTL_EVNTDIR_SHIFT)
+#define CNTHCTL_EVNTDIR_LOW (0x1ul << CNTHCTL_EVNTDIR_SHIFT)
+#define CNTHCTL_EVNTI_SHIFT 4
+#define CNTHCTL_EVNTI_MASK (0xful << CNTHCTL_EVNTI_SHIFT)
+#define CNTHCTL_ECV_SHIFT 12
+#define CNTHCTL_ECV_MASK (0x1ul << CNTHCTL_ECV_SHIFT)
+#define CNTHCTL_ECV_DIS (0x0ul << CNTHCTL_ECV_SHIFT)
+#define CNTHCTL_ECV_EN (0x1ul << CNTHCTL_ECV_SHIFT)
+#define CNTHCTL_EL1TVT_SHIFT 13
+#define CNTHCTL_EL1TVT_MASK (0x1ul << CNTHCTL_EL1TVT_SHIFT)
+#define CNTHCTL_EL1TVT_NOTRAP (0x0ul << CNTHCTL_EL1TVT_SHIFT)
+#define CNTHCTL_EL1TVT_TRAP (0x1ul << CNTHCTL_EL1TVT_SHIFT)
+#define CNTHCTL_EL1TVCT_SHIFT 14
+#define CNTHCTL_EL1TVCT_MASK (0x1ul << CNTHCTL_EL1TVCT_SHIFT)
+#define CNTHCTL_EL1TVCT_NOTRAP (0x0ul << CNTHCTL_EL1TVCT_SHIFT)
+#define CNTHCTL_EL1TVCT_TRAP (0x1ul << CNTHCTL_EL1TVCT_SHIFT)
+#define CNTHCTL_EL1NVPCT_SHIFT 15
+#define CNTHCTL_EL1NVPCT_MASK (0x1ul << CNTHCTL_EL1NVPCT_SHIFT)
+#define CNTHCTL_EL1NVPCT_NOTRAP (0x0ul << CNTHCTL_EL1NVPCT_SHIFT)
+#define CNTHCTL_EL1NVPCT_TRAP (0x1ul << CNTHCTL_EL1NVPCT_SHIFT)
+#define CNTHCTL_EL1NVVCT_SHIFT 16
+#define CNTHCTL_EL1NVVCT_MASK (0x1ul << CNTHCTL_EL1NVVCT_SHIFT)
+#define CNTHCTL_EL1NVVCT_NOTRAP (0x0ul << CNTHCTL_EL1NVVCT_SHIFT)
+#define CNTHCTL_EL1NVVCT_TRAP (0x1ul << CNTHCTL_EL1NVVCT_SHIFT)
+#define CNTHCTL_EVNTIS_SHIFT 17
+#define CNTHCTL_EVNTIS_MASK (0x1ul << CNTHCTL_EVNTIS_SHIFT)
+#define CNTHCTL_CNTVMASK_SHIFT 18
+#define CNTHCTL_CNTVMASK_MASK (0x1ul << CNTHCTL_CNTVMASK_SHIFT)
+#define CNTHCTL_CNTPMASK_SHIFT 19
+#define CNTHCTL_CNTPMASK_MASK (0x1ul << CNTHCTL_CNTPMASK_SHIFT)
/* CNTPOFF_EL2 - Counter-timer Physical Offset Register */
#define CNTPOFF_EL2_REG MRS_REG_ALT_NAME(CNTPOFF_EL2)
diff --git a/sys/arm64/include/pmap.h b/sys/arm64/include/pmap.h
index 0f23f200f0f6..406b6e2c5e0a 100644
--- a/sys/arm64/include/pmap.h
+++ b/sys/arm64/include/pmap.h
@@ -69,6 +69,7 @@ struct md_page {
TAILQ_HEAD(,pv_entry) pv_list;
int pv_gen;
vm_memattr_t pv_memattr;
+ uint8_t pv_reserve[3];
};
enum pmap_stage {
@@ -174,6 +175,8 @@ int pmap_fault(pmap_t, uint64_t, uint64_t);
struct pcb *pmap_switch(struct thread *);
+void pmap_s1_invalidate_all_kernel(void);
+
extern void (*pmap_clean_stage2_tlbi)(void);
extern void (*pmap_stage2_invalidate_range)(uint64_t, vm_offset_t, vm_offset_t,
bool);
diff --git a/sys/arm64/include/proc.h b/sys/arm64/include/proc.h
index 184743d4cc80..b40990e89385 100644
--- a/sys/arm64/include/proc.h
+++ b/sys/arm64/include/proc.h
@@ -75,6 +75,7 @@ struct mdthread {
struct mdproc {
uint64_t md_tcr; /* TCR_EL1 fields to update */
+ uint64_t md_reserved[2];
};
#endif /* !LOCORE */
diff --git a/sys/arm64/include/vmm.h b/sys/arm64/include/vmm.h
index 73b5b4a09591..e839b5dd92c9 100644
--- a/sys/arm64/include/vmm.h
+++ b/sys/arm64/include/vmm.h
@@ -42,6 +42,7 @@ enum vm_suspend_how {
VM_SUSPEND_RESET,
VM_SUSPEND_POWEROFF,
VM_SUSPEND_HALT,
+ VM_SUSPEND_DESTROY,
VM_SUSPEND_LAST
};
diff --git a/sys/arm64/rockchip/rk_gpio.c b/sys/arm64/rockchip/rk_gpio.c
index 61614f532634..145d9769f35f 100644
--- a/sys/arm64/rockchip/rk_gpio.c
+++ b/sys/arm64/rockchip/rk_gpio.c
@@ -90,6 +90,11 @@ struct rk_pin_irqsrc {
uint32_t mode;
};
+struct rk_gpio_reg {
+ uint8_t single;
+ uint8_t offset;
+};
+
struct rk_gpio_softc {
device_t sc_dev;
device_t sc_busdev;
@@ -103,7 +108,7 @@ struct rk_gpio_softc {
uint32_t swporta_ddr;
uint32_t version;
struct pin_cached pin_cached[RK_GPIO_MAX_PINS];
- uint8_t regs[RK_GPIO_REGNUM];
+ struct rk_gpio_reg regs[RK_GPIO_REGNUM];
void *ihandle;
struct rk_pin_irqsrc isrcs[RK_GPIO_MAX_PINS];
};
@@ -138,14 +143,15 @@ static int rk_gpio_detach(device_t dev);
static int
rk_gpio_read_bit(struct rk_gpio_softc *sc, int reg, int bit)
{
- int offset = sc->regs[reg];
+ struct rk_gpio_reg *rk_reg = &sc->regs[reg];
uint32_t value;
- if (sc->version == RK_GPIO_TYPE_V1) {
- value = RK_GPIO_READ(sc, offset);
+ if (rk_reg->single) {
+ value = RK_GPIO_READ(sc, rk_reg->offset);
value >>= bit;
} else {
- value = RK_GPIO_READ(sc, bit > 15 ? offset + 4 : offset);
+ value = RK_GPIO_READ(sc, bit > 15 ?
+ rk_reg->offset + 4 : rk_reg->offset);
value >>= (bit % 16);
}
return (value & 1);
@@ -154,50 +160,53 @@ rk_gpio_read_bit(struct rk_gpio_softc *sc, int reg, int bit)
static void
rk_gpio_write_bit(struct rk_gpio_softc *sc, int reg, int bit, int data)
{
- int offset = sc->regs[reg];
+ struct rk_gpio_reg *rk_reg = &sc->regs[reg];
uint32_t value;
- if (sc->version == RK_GPIO_TYPE_V1) {
- value = RK_GPIO_READ(sc, offset);
+ if (rk_reg->single) {
+ value = RK_GPIO_READ(sc, rk_reg->offset);
if (data)
value |= (1 << bit);
else
value &= ~(1 << bit);
- RK_GPIO_WRITE(sc, offset, value);
+ RK_GPIO_WRITE(sc, rk_reg->offset, value);
} else {
if (data)
value = (1 << (bit % 16));
else
value = 0;
value |= (1 << ((bit % 16) + 16));
- RK_GPIO_WRITE(sc, bit > 15 ? offset + 4 : offset, value);
+ RK_GPIO_WRITE(sc, bit > 15 ?
+ rk_reg->offset + 4 : rk_reg->offset, value);
}
}
static uint32_t
rk_gpio_read_4(struct rk_gpio_softc *sc, int reg)
{
- int offset = sc->regs[reg];
+ struct rk_gpio_reg *rk_reg = &sc->regs[reg];
uint32_t value;
- if (sc->version == RK_GPIO_TYPE_V1)
- value = RK_GPIO_READ(sc, offset);
+ if (rk_reg->single)
+ value = RK_GPIO_READ(sc, rk_reg->offset);
else
- value = (RK_GPIO_READ(sc, offset) & 0xffff) |
- (RK_GPIO_READ(sc, offset + 4) << 16);
+ value = (RK_GPIO_READ(sc, rk_reg->offset) & 0xffff) |
+ (RK_GPIO_READ(sc, rk_reg->offset + 4) << 16);
return (value);
}
static void
rk_gpio_write_4(struct rk_gpio_softc *sc, int reg, uint32_t value)
{
- int offset = sc->regs[reg];
+ struct rk_gpio_reg *rk_reg = &sc->regs[reg];
- if (sc->version == RK_GPIO_TYPE_V1)
- RK_GPIO_WRITE(sc, offset, value);
+ if (rk_reg->single)
+ RK_GPIO_WRITE(sc, rk_reg->offset, value);
else {
- RK_GPIO_WRITE(sc, offset, (value & 0xffff) | 0xffff0000);
- RK_GPIO_WRITE(sc, offset + 4, (value >> 16) | 0xffff0000);
+ RK_GPIO_WRITE(sc, rk_reg->offset,
+ (value & 0xffff) | 0xffff0000);
+ RK_GPIO_WRITE(sc, rk_reg->offset + 4,
+ (value >> 16) | 0xffff0000);
}
}
@@ -313,31 +322,31 @@ rk_gpio_attach(device_t dev)
switch (sc->version) {
case RK_GPIO_TYPE_V1:
- sc->regs[RK_GPIO_SWPORTA_DR] = 0x00;
- sc->regs[RK_GPIO_SWPORTA_DDR] = 0x04;
- sc->regs[RK_GPIO_INTEN] = 0x30;
- sc->regs[RK_GPIO_INTMASK] = 0x34;
- sc->regs[RK_GPIO_INTTYPE_LEVEL] = 0x38;
- sc->regs[RK_GPIO_INT_POLARITY] = 0x3c;
- sc->regs[RK_GPIO_INT_STATUS] = 0x40;
- sc->regs[RK_GPIO_INT_RAWSTATUS] = 0x44;
- sc->regs[RK_GPIO_DEBOUNCE] = 0x48;
- sc->regs[RK_GPIO_PORTA_EOI] = 0x4c;
- sc->regs[RK_GPIO_EXT_PORTA] = 0x50;
+ sc->regs[RK_GPIO_SWPORTA_DR] = (struct rk_gpio_reg){ 1, 0x00 };
+ sc->regs[RK_GPIO_SWPORTA_DDR] = (struct rk_gpio_reg){ 1, 0x04 };
+ sc->regs[RK_GPIO_INTEN] = (struct rk_gpio_reg){ 1, 0x30 };
+ sc->regs[RK_GPIO_INTMASK] = (struct rk_gpio_reg){ 1, 0x34 };
+ sc->regs[RK_GPIO_INTTYPE_LEVEL] = (struct rk_gpio_reg){ 1, 0x38 };
+ sc->regs[RK_GPIO_INT_POLARITY] = (struct rk_gpio_reg){ 1, 0x3c };
+ sc->regs[RK_GPIO_INT_STATUS] = (struct rk_gpio_reg){ 1, 0x40 };
+ sc->regs[RK_GPIO_INT_RAWSTATUS] = (struct rk_gpio_reg){ 1, 0x44 };
+ sc->regs[RK_GPIO_DEBOUNCE] = (struct rk_gpio_reg){ 1, 0x48 };
+ sc->regs[RK_GPIO_PORTA_EOI] = (struct rk_gpio_reg){ 1, 0x4c };
+ sc->regs[RK_GPIO_EXT_PORTA] = (struct rk_gpio_reg){ 1, 0x50 };
break;
case RK_GPIO_TYPE_V2:
- sc->regs[RK_GPIO_SWPORTA_DR] = 0x00;
- sc->regs[RK_GPIO_SWPORTA_DDR] = 0x08;
- sc->regs[RK_GPIO_INTEN] = 0x10;
- sc->regs[RK_GPIO_INTMASK] = 0x18;
- sc->regs[RK_GPIO_INTTYPE_LEVEL] = 0x20;
- sc->regs[RK_GPIO_INTTYPE_BOTH] = 0x30;
- sc->regs[RK_GPIO_INT_POLARITY] = 0x28;
- sc->regs[RK_GPIO_INT_STATUS] = 0x50;
- sc->regs[RK_GPIO_INT_RAWSTATUS] = 0x58;
- sc->regs[RK_GPIO_DEBOUNCE] = 0x38;
- sc->regs[RK_GPIO_PORTA_EOI] = 0x60;
- sc->regs[RK_GPIO_EXT_PORTA] = 0x70;
+ sc->regs[RK_GPIO_SWPORTA_DR] = (struct rk_gpio_reg){ 0, 0x00 };
+ sc->regs[RK_GPIO_SWPORTA_DDR] = (struct rk_gpio_reg){ 0, 0x08 };
+ sc->regs[RK_GPIO_INTEN] = (struct rk_gpio_reg){ 0, 0x10 };
+ sc->regs[RK_GPIO_INTMASK] = (struct rk_gpio_reg){ 0, 0x18 };
+ sc->regs[RK_GPIO_INTTYPE_LEVEL] = (struct rk_gpio_reg){ 0, 0x20 };
+ sc->regs[RK_GPIO_INTTYPE_BOTH] = (struct rk_gpio_reg){ 0, 0x30 };
+ sc->regs[RK_GPIO_INT_POLARITY] = (struct rk_gpio_reg){ 0, 0x28 };
+ sc->regs[RK_GPIO_INT_STATUS] = (struct rk_gpio_reg){ 1, 0x50 };
+ sc->regs[RK_GPIO_INT_RAWSTATUS] = (struct rk_gpio_reg){ 1, 0x58 };
+ sc->regs[RK_GPIO_DEBOUNCE] = (struct rk_gpio_reg){ 0, 0x38 };
+ sc->regs[RK_GPIO_PORTA_EOI] = (struct rk_gpio_reg){ 0, 0x60 };
+ sc->regs[RK_GPIO_EXT_PORTA] = (struct rk_gpio_reg){ 1, 0x70 };
break;
default:
device_printf(dev, "Unknown gpio version %08x\n", sc->version);
@@ -394,7 +403,7 @@ rk_gpio_detach(device_t dev)
mtx_destroy(&sc->sc_mtx);
clk_disable(sc->clk);
- return(0);
+ return (0);
}
static device_t
@@ -471,7 +480,7 @@ rk_gpio_pin_getcaps(device_t dev, uint32_t pin, uint32_t *caps)
{
if (pin >= RK_GPIO_MAX_PINS)
- return EINVAL;
+ return (EINVAL);
*caps = RK_GPIO_DEFAULT_CAPS;
return (0);
@@ -654,46 +663,108 @@ rk_gpio_get_node(device_t bus, device_t dev)
}
static int
-rk_pic_map_intr(device_t dev, struct intr_map_data *data,
- struct intr_irqsrc **isrcp)
+rk_gpio_pic_map_fdt(struct rk_gpio_softc *sc,
+ struct intr_map_data_fdt *daf,
+ u_int *irqp, uint32_t *modep)
{
- struct rk_gpio_softc *sc = device_get_softc(dev);
- struct intr_map_data_gpio *gdata;
uint32_t irq;
+ uint32_t mode;
- if (data->type != INTR_MAP_DATA_GPIO) {
- device_printf(dev, "Wrong type\n");
- return (ENOTSUP);
- }
- gdata = (struct intr_map_data_gpio *)data;
- irq = gdata->gpio_pin_num;
+ if (daf->ncells != 2)
+ return (EINVAL);
+
+ irq = daf->cells[0];
+ if (irq >= RK_GPIO_MAX_PINS)
+ return (EINVAL);
+
+ /* Only reasonable modes are supported. */
+ if (daf->cells[1] == 1)
+ mode = GPIO_INTR_EDGE_RISING;
+ else if (daf->cells[1] == 2)
+ mode = GPIO_INTR_EDGE_FALLING;
+ else if (daf->cells[1] == 3)
+ mode = GPIO_INTR_EDGE_BOTH;
+ else if (daf->cells[1] == 4)
+ mode = GPIO_INTR_LEVEL_HIGH;
+ else if (daf->cells[1] == 8)
+ mode = GPIO_INTR_LEVEL_LOW;
+ else
+ return (EINVAL);
+
+ *irqp = irq;
+ if (modep != NULL)
+ *modep = mode;
+ return (0);
+}
+
+static int
+rk_gpio_pic_map_gpio(struct rk_gpio_softc *sc,
+ struct intr_map_data_gpio *dag,
+ u_int *irqp, uint32_t *modep)
+{
+ uint32_t irq;
+ irq = dag->gpio_pin_num;
if (irq >= RK_GPIO_MAX_PINS) {
- device_printf(dev, "Invalid interrupt %u\n", irq);
+ device_printf(sc->sc_dev, "Invalid interrupt %u\n",
+ irq);
return (EINVAL);
}
- *isrcp = RK_GPIO_ISRC(sc, irq);
+
+ *irqp = irq;
+ if (modep != NULL)
+ *modep = dag->gpio_intr_mode;
return (0);
}
static int
+rk_gpio_pic_map(struct rk_gpio_softc *sc, struct intr_map_data *data,
+ u_int *irqp, uint32_t *modep)
+{
+ switch (data->type) {
+ case INTR_MAP_DATA_FDT:
+ return (rk_gpio_pic_map_fdt(sc,
+ (struct intr_map_data_fdt *)data, irqp, modep));
+ case INTR_MAP_DATA_GPIO:
+ return (rk_gpio_pic_map_gpio(sc,
+ (struct intr_map_data_gpio *)data, irqp, modep));
+ default:
+ device_printf(sc->sc_dev, "Wrong type\n");
+ return (ENOTSUP);
+ }
+}
+
+static int
+rk_pic_map_intr(device_t dev, struct intr_map_data *data,
+ struct intr_irqsrc **isrcp)
+{
+ int error;
+ struct rk_gpio_softc *sc = device_get_softc(dev);
+ uint32_t irq;
+
+ error = rk_gpio_pic_map(sc, data, &irq, NULL);
+ if (error == 0)
+ *isrcp = RK_GPIO_ISRC(sc, irq);
+ return (error);
+}
+
+static int
rk_pic_setup_intr(device_t dev, struct intr_irqsrc *isrc,
struct resource *res, struct intr_map_data *data)
{
struct rk_gpio_softc *sc = device_get_softc(dev);
struct rk_pin_irqsrc *rkisrc = (struct rk_pin_irqsrc *)isrc;
- struct intr_map_data_gpio *gdata;
uint32_t mode;
- uint8_t pin;
+ uint32_t pin;
if (!data) {
device_printf(dev, "No map data\n");
return (ENOTSUP);
}
- gdata = (struct intr_map_data_gpio *)data;
- mode = gdata->gpio_intr_mode;
- pin = gdata->gpio_pin_num;
- if (rkisrc->irq != gdata->gpio_pin_num) {
+ if (rk_gpio_pic_map(sc, data, &pin, &mode) != 0)
+ return (EINVAL);
+
+ if (rkisrc->irq != pin) {
device_printf(dev, "Interrupts don't match\n");
return (EINVAL);
}
diff --git a/sys/arm64/rockchip/rk_tsadc.c b/sys/arm64/rockchip/rk_tsadc.c
index e6cbad36f697..d83b09480a0c 100644
--- a/sys/arm64/rockchip/rk_tsadc.c
+++ b/sys/arm64/rockchip/rk_tsadc.c
@@ -484,7 +484,7 @@ tsadc_init_tsensor(struct tsadc_softc *sc, struct tsensor *sensor)
WR4(sc, TSADC_INT_EN, val);
/* Shutdown temperature */
- val = tsadc_raw_to_temp(sc, sc->shutdown_temp);
+ val = tsadc_temp_to_raw(sc, sc->shutdown_temp);
WR4(sc, TSADC_COMP_SHUT(sensor->channel), val);
val = RD4(sc, TSADC_AUTO_CON);
val |= TSADC_AUTO_SRC_EN(sensor->channel);
diff --git a/sys/arm64/vmm/arm64.h b/sys/arm64/vmm/arm64.h
index 6a0c4c78e568..82c4481b8692 100644
--- a/sys/arm64/vmm/arm64.h
+++ b/sys/arm64/vmm/arm64.h
@@ -125,6 +125,9 @@ struct hyp {
uint64_t vmid_generation;
uint64_t vttbr_el2;
uint64_t el2_addr; /* The address of this in el2 space */
+ uint64_t feats; /* Which features are enabled */
+#define HYP_FEAT_HCX (0x1ul << 0)
+#define HYP_FEAT_ECV_POFF (0x1ul << 1)
bool vgic_attached;
struct vgic_v3 *vgic;
struct hypctx *ctx[];
diff --git a/sys/arm64/vmm/hyp.h b/sys/arm64/vmm/hyp.h
index 0b2977c73960..0c8d0fb28b18 100644
--- a/sys/arm64/vmm/hyp.h
+++ b/sys/arm64/vmm/hyp.h
@@ -80,7 +80,6 @@
#define HYP_ENTER_GUEST 0x00000002
#define HYP_READ_REGISTER 0x00000003
#define HYP_REG_ICH_VTR 0x1
-#define HYP_REG_CNTHCTL 0x2
#define HYP_CLEAN_S2_TLBI 0x00000004
#define HYP_DC_CIVAC 0x00000005
#define HYP_EL2_TLBI 0x00000006
diff --git a/sys/arm64/vmm/io/vtimer.c b/sys/arm64/vmm/io/vtimer.c
index f59d7ebc1ad4..da0f0d96c431 100644
--- a/sys/arm64/vmm/io/vtimer.c
+++ b/sys/arm64/vmm/io/vtimer.c
@@ -36,6 +36,7 @@
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/rman.h>
+#include <sys/sysctl.h>
#include <sys/time.h>
#include <sys/timeet.h>
#include <sys/timetc.h>
@@ -55,11 +56,18 @@
#define timer_enabled(ctl) \
(!((ctl) & CNTP_CTL_IMASK) && ((ctl) & CNTP_CTL_ENABLE))
-static uint64_t cnthctl_el2_reg;
static uint32_t tmr_frq;
#define timer_condition_met(ctl) ((ctl) & CNTP_CTL_ISTATUS)
+SYSCTL_DECL(_hw_vmm);
+SYSCTL_NODE(_hw_vmm, OID_AUTO, vtimer, CTLFLAG_RW, NULL, NULL);
+
+static bool allow_ecv_phys = false;
+SYSCTL_BOOL(_hw_vmm_vtimer, OID_AUTO, allow_ecv_phys, CTLFLAG_RW,
+ &allow_ecv_phys, 0,
+ "Enable hardware access to the physical timer if FEAT_ECV_POFF is supported");
+
static void vtimer_schedule_irq(struct hypctx *hypctx, bool phys);
static int
@@ -111,9 +119,8 @@ out:
}
int
-vtimer_init(uint64_t cnthctl_el2)
+vtimer_init(void)
{
- cnthctl_el2_reg = cnthctl_el2;
/*
* The guest *MUST* use the same timer frequency as the host. The
* register CNTFRQ_EL0 is accessible to the guest and a different value
@@ -128,8 +135,12 @@ void
vtimer_vminit(struct hyp *hyp)
{
uint64_t now;
+ bool ecv_poff;
- hyp->vtimer.cnthctl_el2 = cnthctl_el2_reg;
+ ecv_poff = false;
+
+ if (allow_ecv_phys && (hyp->feats & HYP_FEAT_ECV_POFF) != 0)
+ ecv_poff = true;
/*
* Configure the Counter-timer Hypervisor Control Register for the VM.
@@ -137,35 +148,58 @@ vtimer_vminit(struct hyp *hyp)
if (in_vhe()) {
/*
* CNTHCTL_E2H_EL0PCTEN: trap EL0 access to CNTP{CT,CTSS}_EL0
- * CNTHCTL_E2H_EL1VCTEN: don't trap EL0 access to
- * CNTV{CT,CTSS}_EL0
+ * CNTHCTL_E2H_EL0VCTEN: don't trap EL0 access to
+ * CNTV{CT,CTXX}_EL0
* CNTHCTL_E2H_EL0VTEN: don't trap EL0 access to
* CNTV_{CTL,CVAL,TVAL}_EL0
* CNTHCTL_E2H_EL0PTEN: trap EL0 access to
* CNTP_{CTL,CVAL,TVAL}_EL0
- * CNTHCTL_E2H_EL1PCEN: trap EL1 access to
- CNTP_{CTL,CVAL,TVAL}_EL0
* CNTHCTL_E2H_EL1PCTEN: trap access to CNTPCT_EL0
+ * CNTHCTL_E2H_EL1PTEN: trap access to
+ * CNTP_{CTL,CVAL,TVAL}_EL0
+ * CNTHCTL_E2H_EL1VCTEN: don't trap EL0 access to
+ * CNTV{CT,CTSS}_EL0
+ * CNTHCTL_E2H_EL1PCEN: trap EL1 access to
+ * CNTP_{CTL,CVAL,TVAL}_EL0
*
* TODO: Don't trap when FEAT_ECV is present
*/
- hyp->vtimer.cnthctl_el2 &= ~CNTHCTL_E2H_EL0PCTEN;
- hyp->vtimer.cnthctl_el2 |= CNTHCTL_E2H_EL0VCTEN;
- hyp->vtimer.cnthctl_el2 |= CNTHCTL_E2H_EL0VTEN;
- hyp->vtimer.cnthctl_el2 &= ~CNTHCTL_E2H_EL0PTEN;
-
- hyp->vtimer.cnthctl_el2 &= ~CNTHCTL_E2H_EL1PTEN;
- hyp->vtimer.cnthctl_el2 &= ~CNTHCTL_E2H_EL1PCTEN;
+ hyp->vtimer.cnthctl_el2 =
+ CNTHCTL_E2H_EL0VCTEN_NOTRAP |
+ CNTHCTL_E2H_EL0VTEN_NOTRAP;
+ if (ecv_poff) {
+ hyp->vtimer.cnthctl_el2 |=
+ CNTHCTL_E2H_EL0PCTEN_NOTRAP |
+ CNTHCTL_E2H_EL0PTEN_NOTRAP |
+ CNTHCTL_E2H_EL1PCTEN_NOTRAP |
+ CNTHCTL_E2H_EL1PTEN_NOTRAP;
+ } else {
+ hyp->vtimer.cnthctl_el2 |=
+ CNTHCTL_E2H_EL0PCTEN_TRAP |
+ CNTHCTL_E2H_EL0PTEN_TRAP |
+ CNTHCTL_E2H_EL1PCTEN_TRAP |
+ CNTHCTL_E2H_EL1PTEN_TRAP;
+ }
} else {
/*
* CNTHCTL_EL1PCEN: trap access to CNTP_{CTL, CVAL, TVAL}_EL0
* from EL1
* CNTHCTL_EL1PCTEN: trap access to CNTPCT_EL0
*/
- hyp->vtimer.cnthctl_el2 &= ~CNTHCTL_EL1PCEN;
- hyp->vtimer.cnthctl_el2 &= ~CNTHCTL_EL1PCTEN;
+ if (ecv_poff) {
+ hyp->vtimer.cnthctl_el2 =
+ CNTHCTL_EL1PCTEN_NOTRAP |
+ CNTHCTL_EL1PCEN_NOTRAP;
+ } else {
+ hyp->vtimer.cnthctl_el2 =
+ CNTHCTL_EL1PCTEN_TRAP |
+ CNTHCTL_EL1PCEN_TRAP;
+ }
}
+ if (ecv_poff)
+ hyp->vtimer.cnthctl_el2 |= CNTHCTL_ECV_EN;
+
now = READ_SPECIALREG(cntpct_el0);
hyp->vtimer.cntvoff_el2 = now;
@@ -231,15 +265,10 @@ vtimer_cleanup(void)
{
}
-void
-vtimer_sync_hwstate(struct hypctx *hypctx)
+static void
+vtime_sync_timer(struct hypctx *hypctx, struct vtimer_timer *timer,
+ uint64_t cntpct_el0)
{
- struct vtimer_timer *timer;
- uint64_t cntpct_el0;
-
- timer = &hypctx->vtimer_cpu.virt_timer;
- cntpct_el0 = READ_SPECIALREG(cntpct_el0) -
- hypctx->hyp->vtimer.cntvoff_el2;
if (!timer_enabled(timer->cntx_ctl_el0)) {
vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
timer->irqid, false);
@@ -253,6 +282,21 @@ vtimer_sync_hwstate(struct hypctx *hypctx)
}
}
+void
+vtimer_sync_hwstate(struct hypctx *hypctx)
+{
+ uint64_t cntpct_el0;
+
+ cntpct_el0 = READ_SPECIALREG(cntpct_el0) -
+ hypctx->hyp->vtimer.cntvoff_el2;
+ vtime_sync_timer(hypctx, &hypctx->vtimer_cpu.virt_timer, cntpct_el0);
+ /* If FEAT_ECV_POFF is in use then we need to sync the physical timer */
+ if ((hypctx->hyp->vtimer.cnthctl_el2 & CNTHCTL_ECV_EN) != 0) {
+ vtime_sync_timer(hypctx, &hypctx->vtimer_cpu.phys_timer,
+ cntpct_el0);
+ }
+}
+
static void
vtimer_inject_irq_callout_phys(void *context)
{
diff --git a/sys/arm64/vmm/io/vtimer.h b/sys/arm64/vmm/io/vtimer.h
index 71a20344d05e..92ce025968d2 100644
--- a/sys/arm64/vmm/io/vtimer.h
+++ b/sys/arm64/vmm/io/vtimer.h
@@ -66,7 +66,7 @@ struct vtimer_cpu {
uint32_t cntkctl_el1;
};
-int vtimer_init(uint64_t cnthctl_el2);
+int vtimer_init(void);
void vtimer_vminit(struct hyp *);
void vtimer_cpuinit(struct hypctx *);
void vtimer_cpucleanup(struct hypctx *);
diff --git a/sys/arm64/vmm/vmm.c b/sys/arm64/vmm/vmm.c
index 3082d2941221..1dcefa1489e9 100644
--- a/sys/arm64/vmm/vmm.c
+++ b/sys/arm64/vmm/vmm.c
@@ -1342,8 +1342,14 @@ vm_handle_smccc_call(struct vcpu *vcpu, struct vm_exit *vme, bool *retu)
static int
vm_handle_wfi(struct vcpu *vcpu, struct vm_exit *vme, bool *retu)
{
+ struct vm *vm;
+
+ vm = vcpu->vm;
vcpu_lock(vcpu);
while (1) {
+ if (vm->suspend)
+ break;
+
if (vgic_has_pending_irq(vcpu->cookie))
break;
diff --git a/sys/arm64/vmm/vmm_arm64.c b/sys/arm64/vmm/vmm_arm64.c
index e293c99a6646..618f4afaf8ee 100644
--- a/sys/arm64/vmm/vmm_arm64.c
+++ b/sys/arm64/vmm/vmm_arm64.c
@@ -238,7 +238,6 @@ vmmops_modinit(int ipinum)
vm_offset_t next_hyp_va;
vm_paddr_t vmm_base;
uint64_t id_aa64mmfr0_el1, pa_range_bits, pa_range_field;
- uint64_t cnthctl_el2;
int cpu, i;
bool rv __diagused;
@@ -444,10 +443,9 @@ vmmops_modinit(int ipinum)
vmem_add(el2_mem_alloc, next_hyp_va,
HYP_VM_MAX_ADDRESS - next_hyp_va, M_WAITOK);
}
- cnthctl_el2 = vmm_read_reg(HYP_REG_CNTHCTL);
vgic_init();
- vtimer_init(cnthctl_el2);
+ vtimer_init();
return (0);
}
@@ -517,6 +515,7 @@ vmmops_init(struct vm *vm, pmap_t pmap)
{
struct hyp *hyp;
vm_size_t size;
+ uint64_t idreg;
size = el2_hyp_size(vm);
hyp = malloc_aligned(size, PAGE_SIZE, M_HYP, M_WAITOK | M_ZERO);
@@ -524,6 +523,16 @@ vmmops_init(struct vm *vm, pmap_t pmap)
hyp->vm = vm;
hyp->vgic_attached = false;
+ if (get_kernel_reg(ID_AA64MMFR0_EL1, &idreg)) {
+ if (ID_AA64MMFR0_ECV_VAL(idreg) >= ID_AA64MMFR0_ECV_POFF)
+ hyp->feats |= HYP_FEAT_ECV_POFF;
+ }
+
+ if (get_kernel_reg(ID_AA64MMFR1_EL1, &idreg)) {
+ if (ID_AA64MMFR1_HCX_VAL(idreg) >= ID_AA64MMFR1_HCX_IMPL)
+ hyp->feats |= HYP_FEAT_HCX;
+ }
+
vtimer_vminit(hyp);
vgic_vminit(hyp);
diff --git a/sys/arm64/vmm/vmm_hyp.c b/sys/arm64/vmm/vmm_hyp.c
index d61885c15871..345535318f6e 100644
--- a/sys/arm64/vmm/vmm_hyp.c
+++ b/sys/arm64/vmm/vmm_hyp.c
@@ -42,11 +42,11 @@ struct hypctx;
uint64_t VMM_HYP_FUNC(do_call_guest)(struct hypctx *);
static void
-vmm_hyp_reg_store(struct hypctx *hypctx, struct hyp *hyp, bool guest)
+vmm_hyp_reg_store(struct hypctx *hypctx, struct hyp *hyp, bool guest,
+ bool ecv_poff)
{
uint64_t dfr0;
- /* Store the guest VFP registers */
if (guest) {
/* Store the timer registers */
hypctx->vtimer_cpu.cntkctl_el1 =
@@ -55,7 +55,20 @@ vmm_hyp_reg_store(struct hypctx *hypctx, struct hyp *hyp, bool guest)
READ_SPECIALREG(EL0_REG(CNTV_CVAL));
hypctx->vtimer_cpu.virt_timer.cntx_ctl_el0 =
READ_SPECIALREG(EL0_REG(CNTV_CTL));
+ }
+ if (guest_or_nonvhe(guest) && ecv_poff) {
+ /*
+ * If we have ECV then the guest could modify these registers.
+ * If VHE is enabled then the kernel will see a different view
+ * of the registers, so doesn't need to handle them.
+ */
+ hypctx->vtimer_cpu.phys_timer.cntx_cval_el0 =
+ READ_SPECIALREG(EL0_REG(CNTP_CVAL));
+ hypctx->vtimer_cpu.phys_timer.cntx_ctl_el0 =
+ READ_SPECIALREG(EL0_REG(CNTP_CTL));
+ }
+ if (guest) {
/* Store the GICv3 registers */
hypctx->vgic_v3_regs.ich_eisr_el2 =
READ_SPECIALREG(ich_eisr_el2);
@@ -259,29 +272,20 @@ vmm_hyp_reg_store(struct hypctx *hypctx, struct hyp *hyp, bool guest)
hypctx->hcr_el2 = READ_SPECIALREG(hcr_el2);
hypctx->vpidr_el2 = READ_SPECIALREG(vpidr_el2);
hypctx->vmpidr_el2 = READ_SPECIALREG(vmpidr_el2);
-
-#ifndef VMM_VHE
- /* hcrx_el2 depends on feat_hcx */
- uint64_t mmfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
- if (ID_AA64MMFR1_HCX_VAL(mmfr1) >> ID_AA64MMFR1_HCX_SHIFT) {
- hypctx->hcrx_el2 = READ_SPECIALREG(MRS_REG_ALT_NAME(HCRX_EL2));
- }
-#endif
}
static void
-vmm_hyp_reg_restore(struct hypctx *hypctx, struct hyp *hyp, bool guest)
+vmm_hyp_reg_restore(struct hypctx *hypctx, struct hyp *hyp, bool guest,
+ bool ecv_poff)
{
uint64_t dfr0;
/* Restore the special registers */
WRITE_SPECIALREG(hcr_el2, hypctx->hcr_el2);
- if (guest_or_nonvhe(guest)) {
- uint64_t mmfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
- if (ID_AA64MMFR1_HCX_VAL(mmfr1) >> ID_AA64MMFR1_HCX_SHIFT) {
- WRITE_SPECIALREG(MRS_REG_ALT_NAME(HCRX_EL2), hypctx->hcrx_el2);
- }
+ if (guest) {
+ if ((hyp->feats & HYP_FEAT_HCX) != 0)
+ WRITE_SPECIALREG(HCRX_EL2_REG, hypctx->hcrx_el2);
}
isb();
@@ -450,6 +454,29 @@ vmm_hyp_reg_restore(struct hypctx *hypctx, struct hyp *hyp, bool guest)
WRITE_SPECIALREG(cnthctl_el2, hyp->vtimer.cnthctl_el2);
WRITE_SPECIALREG(cntvoff_el2, hyp->vtimer.cntvoff_el2);
+ if (ecv_poff) {
+ /*
+ * Load the same offset as the virtual timer
+ * to keep in sync.
+ */
+ WRITE_SPECIALREG(CNTPOFF_EL2_REG,
+ hyp->vtimer.cntvoff_el2);
+ isb();
+ }
+ }
+ if (guest_or_nonvhe(guest) && ecv_poff) {
+ /*
+ * If we have ECV then the guest could modify these registers.
+ * If VHE is enabled then the kernel will see a different view
+ * of the registers, so doesn't need to handle them.
+ */
+ WRITE_SPECIALREG(EL0_REG(CNTP_CVAL),
+ hypctx->vtimer_cpu.phys_timer.cntx_cval_el0);
+ WRITE_SPECIALREG(EL0_REG(CNTP_CTL),
+ hypctx->vtimer_cpu.phys_timer.cntx_ctl_el0);
+ }
+
+ if (guest) {
/* Load the GICv3 registers */
WRITE_SPECIALREG(ich_hcr_el2, hypctx->vgic_v3_regs.ich_hcr_el2);
WRITE_SPECIALREG(ich_vmcr_el2,
@@ -502,11 +529,19 @@ vmm_hyp_call_guest(struct hyp *hyp, struct hypctx *hypctx)
struct hypctx host_hypctx;
uint64_t cntvoff_el2;
uint64_t ich_hcr_el2, ich_vmcr_el2, cnthctl_el2, cntkctl_el1;
+#ifndef VMM_VHE
+ uint64_t hcrx_el2;
+#endif
uint64_t ret;
uint64_t s1e1r, hpfar_el2;
- bool hpfar_valid;
+ bool ecv_poff, hpfar_valid;
- vmm_hyp_reg_store(&host_hypctx, NULL, false);
+ ecv_poff = (hyp->vtimer.cnthctl_el2 & CNTHCTL_ECV_EN) != 0;
+ vmm_hyp_reg_store(&host_hypctx, NULL, false, ecv_poff);
+#ifndef VMM_VHE
+ if ((hyp->feats & HYP_FEAT_HCX) != 0)
+ hcrx_el2 = READ_SPECIALREG(MRS_REG_ALT_NAME(HCRX_EL2));
+#endif
/* Save the host special registers */
cnthctl_el2 = READ_SPECIALREG(cnthctl_el2);
@@ -516,7 +551,7 @@ vmm_hyp_call_guest(struct hyp *hyp, struct hypctx *hypctx)
ich_hcr_el2 = READ_SPECIALREG(ich_hcr_el2);
ich_vmcr_el2 = READ_SPECIALREG(ich_vmcr_el2);
- vmm_hyp_reg_restore(hypctx, hyp, true);
+ vmm_hyp_reg_restore(hypctx, hyp, true, ecv_poff);
/* Load the common hypervisor registers */
WRITE_SPECIALREG(vttbr_el2, hyp->vttbr_el2);
@@ -532,7 +567,7 @@ vmm_hyp_call_guest(struct hyp *hyp, struct hypctx *hypctx)
/* Store the exit info */
hypctx->exit_info.far_el2 = READ_SPECIALREG(far_el2);
- vmm_hyp_reg_store(hypctx, hyp, true);
+ vmm_hyp_reg_store(hypctx, hyp, true, ecv_poff);
hpfar_valid = true;
if (ret == EXCP_TYPE_EL1_SYNC) {
@@ -582,7 +617,12 @@ vmm_hyp_call_guest(struct hyp *hyp, struct hypctx *hypctx)
}
}
- vmm_hyp_reg_restore(&host_hypctx, NULL, false);
+ vmm_hyp_reg_restore(&host_hypctx, NULL, false, ecv_poff);
+
+#ifndef VMM_VHE
+ if ((hyp->feats & HYP_FEAT_HCX) != 0)
+ WRITE_SPECIALREG(MRS_REG_ALT_NAME(HCRX_EL2), hcrx_el2);
+#endif
/* Restore the host special registers */
WRITE_SPECIALREG(ich_hcr_el2, ich_hcr_el2);
@@ -613,8 +653,6 @@ VMM_HYP_FUNC(read_reg)(uint64_t reg)
switch (reg) {
case HYP_REG_ICH_VTR:
return (READ_SPECIALREG(ich_vtr_el2));
- case HYP_REG_CNTHCTL:
- return (READ_SPECIALREG(cnthctl_el2));
}
return (0);