aboutsummaryrefslogtreecommitdiff
path: root/sys/arm64/include
diff options
context:
space:
mode:
Diffstat (limited to 'sys/arm64/include')
-rw-r--r--sys/arm64/include/_armreg.h57
-rw-r--r--sys/arm64/include/armreg.h73
-rw-r--r--sys/arm64/include/cpu.h32
-rw-r--r--sys/arm64/include/cpu_feat.h52
-rw-r--r--sys/arm64/include/cpufunc.h7
-rw-r--r--sys/arm64/include/db_machdep.h1
-rw-r--r--sys/arm64/include/hypervisor.h177
-rw-r--r--sys/arm64/include/kexec.h33
-rw-r--r--sys/arm64/include/pcpu.h3
-rw-r--r--sys/arm64/include/pmap.h3
-rw-r--r--sys/arm64/include/proc.h1
-rw-r--r--sys/arm64/include/smp.h1
-rw-r--r--sys/arm64/include/vmm.h56
-rw-r--r--sys/arm64/include/vmm_dev.h2
14 files changed, 391 insertions, 107 deletions
diff --git a/sys/arm64/include/_armreg.h b/sys/arm64/include/_armreg.h
new file mode 100644
index 000000000000..0f5134e5a978
--- /dev/null
+++ b/sys/arm64/include/_armreg.h
@@ -0,0 +1,57 @@
+/*-
+ * Copyright (c) 2013, 2014 Andrew Turner
+ * Copyright (c) 2015,2021 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Andrew Turner
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if !defined(_MACHINE_ARMREG_H_) && \
+ !defined(_MACHINE_CPU_H_) && \
+ !defined(_MACHINE_HYPERVISOR_H_)
+#error Do not include this file directly
+#endif
+
+#ifndef _MACHINE__ARMREG_H_
+#define _MACHINE__ARMREG_H_
+
+#define __MRS_REG_ALT_NAME(op0, op1, crn, crm, op2) \
+ S##op0##_##op1##_C##crn##_C##crm##_##op2
+#define _MRS_REG_ALT_NAME(op0, op1, crn, crm, op2) \
+ __MRS_REG_ALT_NAME(op0, op1, crn, crm, op2)
+#define MRS_REG_ALT_NAME(reg) \
+ _MRS_REG_ALT_NAME(reg##_op0, reg##_op1, reg##_CRn, reg##_CRm, reg##_op2)
+
+
+#define READ_SPECIALREG(reg) \
+({ uint64_t _val; \
+ __asm __volatile("mrs %0, " __STRING(reg) : "=&r" (_val)); \
+ _val; \
+})
+#define WRITE_SPECIALREG(reg, _val) \
+ __asm __volatile("msr " __STRING(reg) ", %0" : : "r"((uint64_t)_val))
+
+#define UL(x) UINT64_C(x)
+
+#endif /* !_MACHINE__ARMREG_H_ */
diff --git a/sys/arm64/include/armreg.h b/sys/arm64/include/armreg.h
index 500f35c48787..aa9b672ad85a 100644
--- a/sys/arm64/include/armreg.h
+++ b/sys/arm64/include/armreg.h
@@ -34,25 +34,9 @@
#ifndef _MACHINE_ARMREG_H_
#define _MACHINE_ARMREG_H_
-#define INSN_SIZE 4
-
-#define __MRS_REG_ALT_NAME(op0, op1, crn, crm, op2) \
- S##op0##_##op1##_C##crn##_C##crm##_##op2
-#define _MRS_REG_ALT_NAME(op0, op1, crn, crm, op2) \
- __MRS_REG_ALT_NAME(op0, op1, crn, crm, op2)
-#define MRS_REG_ALT_NAME(reg) \
- _MRS_REG_ALT_NAME(reg##_op0, reg##_op1, reg##_CRn, reg##_CRm, reg##_op2)
-
+#include <machine/_armreg.h>
-#define READ_SPECIALREG(reg) \
-({ uint64_t _val; \
- __asm __volatile("mrs %0, " __STRING(reg) : "=&r" (_val)); \
- _val; \
-})
-#define WRITE_SPECIALREG(reg, _val) \
- __asm __volatile("msr " __STRING(reg) ", %0" : : "r"((uint64_t)_val))
-
-#define UL(x) UINT64_C(x)
+#define INSN_SIZE 4
/* AFSR0_EL1 - Auxiliary Fault Status Register 0 */
#define AFSR0_EL1_REG MRS_REG_ALT_NAME(AFSR0_EL1)
@@ -232,6 +216,14 @@
#define CNTP_CTL_IMASK (1 << 1)
#define CNTP_CTL_ISTATUS (1 << 2)
+/* CNTP_CTL_EL02 - Counter-timer Physical Timer Control register */
+#define CNTP_CTL_EL02_REG MRS_REG_ALT_NAME(CNTP_CTL_EL02)
+#define CNTP_CTL_EL02_op0 3
+#define CNTP_CTL_EL02_op1 5
+#define CNTP_CTL_EL02_CRn 14
+#define CNTP_CTL_EL02_CRm 2
+#define CNTP_CTL_EL02_op2 1
+
/* CNTP_CVAL_EL0 - Counter-timer Physical Timer CompareValue register */
#define CNTP_CVAL_EL0_op0 3
#define CNTP_CVAL_EL0_op1 3
@@ -239,6 +231,14 @@
#define CNTP_CVAL_EL0_CRm 2
#define CNTP_CVAL_EL0_op2 2
+/* CNTP_CVAL_EL02 - Counter-timer Physical Timer CompareValue register */
+#define CNTP_CVAL_EL02_REG MRS_REG_ALT_NAME(CNTP_CVAL_EL02)
+#define CNTP_CVAL_EL02_op0 3
+#define CNTP_CVAL_EL02_op1 5
+#define CNTP_CVAL_EL02_CRn 14
+#define CNTP_CVAL_EL02_CRm 2
+#define CNTP_CVAL_EL02_op2 2
+
/* CNTP_TVAL_EL0 - Counter-timer Physical Timer TimerValue register */
#define CNTP_TVAL_EL0_op0 3
#define CNTP_TVAL_EL0_op1 3
@@ -254,6 +254,14 @@
#define CNTPCT_EL0_CRm 0
#define CNTPCT_EL0_op2 1
+/* CNTPCTSS_EL0 - Counter-timer Self-Synchronized Physical Count register */
+#define CNTPCTSS_EL0_REG MRS_REG_ALT_NAME(CNTPCTSS_EL0)
+#define CNTPCTSS_EL0_op0 3
+#define CNTPCTSS_EL0_op1 3
+#define CNTPCTSS_EL0_CRn 14
+#define CNTPCTSS_EL0_CRm 0
+#define CNTPCTSS_EL0_op2 5
+
/* CNTV_CTL_EL0 - Counter-timer Virtual Timer Control register */
#define CNTV_CTL_EL0_op0 3
#define CNTV_CTL_EL0_op1 3
@@ -282,6 +290,14 @@
#define CNTV_CVAL_EL02_CRm 3
#define CNTV_CVAL_EL02_op2 2
+/* CNTVCTSS_EL0 - Counter-timer Self-Synchronized Virtual Count register */
+#define CNTVCTSS_EL0_REG MRS_REG_ALT_NAME(CNTVCTSS_EL0)
+#define CNTVCTSS_EL0_op0 3
+#define CNTVCTSS_EL0_op1 3
+#define CNTVCTSS_EL0_CRn 14
+#define CNTVCTSS_EL0_CRm 0
+#define CNTVCTSS_EL0_op2 6
+
/* CONTEXTIDR_EL1 - Context ID register */
#define CONTEXTIDR_EL1_REG MRS_REG_ALT_NAME(CONTEXTIDR_EL1)
#define CONTEXTIDR_EL1_op0 3
@@ -2148,6 +2164,7 @@
#define OSLAR_EL1_CRn 1
#define OSLAR_EL1_CRm 0
#define OSLAR_EL1_op2 4
+#define OSLAR_OSLK (0x1ul << 0)
/* OSLSR_EL1 */
#define OSLSR_EL1_op0 2
@@ -2155,6 +2172,10 @@
#define OSLSR_EL1_CRn 1
#define OSLSR_EL1_CRm 1
#define OSLSR_EL1_op2 4
+#define OSLSR_OSLM_1 (0x1ul << 3)
+#define OSLSR_nTT (0x1ul << 2)
+#define OSLSR_OSLK (0x1ul << 1)
+#define OSLSR_OSLM_0 (0x1ul << 0)
/* PAR_EL1 - Physical Address Register */
#define PAR_F_SHIFT 0
@@ -2241,6 +2262,11 @@
#define PMBSR_DL (UL(0x1) << PMBSR_DL_SHIFT)
#define PMBSR_EC_SHIFT 26
#define PMBSR_EC_MASK (UL(0x3f) << PMBSR_EC_SHIFT)
+#define PMBSR_EC_VAL(x) (((x) & PMBSR_EC_MASK) >> PMBSR_EC_SHIFT)
+#define PMBSR_EC_OTHER_BUF_MGMT 0x00
+#define PMBSR_EC_GRAN_PROT_CHK 0x1e
+#define PMBSR_EC_STAGE1_DA 0x24
+#define PMBSR_EC_STAGE2_DA 0x25
/* PMCCFILTR_EL0 */
#define PMCCFILTR_EL0_op0 3
@@ -2476,6 +2502,15 @@
#define PMSIDR_FnE (UL(0x1) << PMSIDR_FnE_SHIFT)
#define PMSIDR_Interval_SHIFT 8
#define PMSIDR_Interval_MASK (UL(0xf) << PMSIDR_Interval_SHIFT)
+#define PMSIDR_Interval_VAL(x) (((x) & PMSIDR_Interval_MASK) >> PMSIDR_Interval_SHIFT)
+#define PMSIDR_Interval_256 0
+#define PMSIDR_Interval_512 2
+#define PMSIDR_Interval_768 3
+#define PMSIDR_Interval_1024 4
+#define PMSIDR_Interval_1536 5
+#define PMSIDR_Interval_2048 6
+#define PMSIDR_Interval_3072 7
+#define PMSIDR_Interval_4096 8
#define PMSIDR_MaxSize_SHIFT 12
#define PMSIDR_MaxSize_MASK (UL(0xf) << PMSIDR_MaxSize_SHIFT)
#define PMSIDR_CountSize_SHIFT 16
@@ -2612,10 +2647,12 @@
(SCTLR_LSMAOE | SCTLR_nTLSMD | SCTLR_EIS | SCTLR_TSCXT | SCTLR_EOS)
#define SCTLR_MMU_ON \
(SCTLR_MMU_OFF | \
+ SCTLR_EPAN | \
SCTLR_BT1 | \
SCTLR_BT0 | \
SCTLR_UCI | \
SCTLR_SPAN | \
+ SCTLR_IESB | \
SCTLR_nTWE | \
SCTLR_nTWI | \
SCTLR_UCT | \
diff --git a/sys/arm64/include/cpu.h b/sys/arm64/include/cpu.h
index 59cda36f275e..b15210633d37 100644
--- a/sys/arm64/include/cpu.h
+++ b/sys/arm64/include/cpu.h
@@ -43,10 +43,10 @@
#define _MACHINE_CPU_H_
#if !defined(__ASSEMBLER__)
+#include <machine/_armreg.h>
#include <machine/atomic.h>
#include <machine/frame.h>
#endif
-#include <machine/armreg.h>
#define TRAPF_PC(tfp) ((tfp)->tf_elr)
#define TRAPF_USERMODE(tfp) (((tfp)->tf_spsr & PSR_M_MASK) == PSR_M_EL0t)
@@ -125,7 +125,11 @@
#define CPU_PART_NEOVERSE_V3 0xD84
#define CPU_PART_CORTEX_X925 0xD85
#define CPU_PART_CORTEX_A725 0xD87
+#define CPU_PART_C1_NANO 0xD8A
+#define CPU_PART_C1_PRO 0xD8B
+#define CPU_PART_C1_ULTRA 0xD8C
#define CPU_PART_NEOVERSE_N3 0xD8E
+#define CPU_PART_C1_PREMIUM 0xD90
/* Cavium Part numbers */
#define CPU_PART_THUNDERX 0x0A1
@@ -193,8 +197,30 @@
(((mask) & PCPU_GET(midr)) == \
((mask) & CPU_ID_RAW((impl), (part), (var), (rev))))
-#define CPU_MATCH_RAW(mask, devid) \
- (((mask) & PCPU_GET(midr)) == ((mask) & (devid)))
+#if !defined(__ASSEMBLER__)
+static inline bool
+midr_check_var_part_range(u_int midr, u_int impl, u_int part, u_int var_low,
+ u_int part_low, u_int var_high, u_int part_high)
+{
+ /* Check for the correct part */
+ if (CPU_IMPL(midr) != impl || CPU_PART(midr) != part)
+ return (false);
+
+ /* Check if the variant is between var_low and var_high inclusive */
+ if (CPU_VAR(midr) < var_low || CPU_VAR(midr) > var_high)
+ return (false);
+
+ /* If the variant is the low value, check if the part is high enough */
+ if (CPU_VAR(midr) == var_low && CPU_PART(midr) < part_low)
+ return (false);
+
+ /* If the variant is the high value, check if the part is low enough */
+ if (CPU_VAR(midr) == var_high && CPU_PART(midr) > part_high)
+ return (false);
+
+ return (true);
+}
+#endif
/*
* Chip-specific errata. This defines are intended to be
diff --git a/sys/arm64/include/cpu_feat.h b/sys/arm64/include/cpu_feat.h
index 9fe6a9dd95d9..6a311d4000bb 100644
--- a/sys/arm64/include/cpu_feat.h
+++ b/sys/arm64/include/cpu_feat.h
@@ -29,6 +29,7 @@
#define _MACHINE_CPU_FEAT_H_
#include <sys/linker_set.h>
+#include <sys/sysctl.h>
typedef enum {
ERRATA_UNKNOWN, /* Unknown erratum */
@@ -39,6 +40,31 @@ typedef enum {
/* kernel component. */
} cpu_feat_errata;
+typedef enum {
+ /*
+ * Don't implement the feature or erratum wrokarount,
+ * e.g. the feature is not implemented or erratum is
+ * for another CPU.
+ */
+ FEAT_ALWAYS_DISABLE,
+
+ /*
+ * Disable by default, but allow the user to enable,
+ * e.g. For a rare erratum with a workaround, Arm
+ * Category B (rare) or similar.
+ */
+ FEAT_DEFAULT_DISABLE,
+
+ /*
+ * Enabled by default, bit allow the user to disable,
+ * e.g. For a common erratum with a workaround, Arm
+ * Category A or B or similar.
+ */
+ FEAT_DEFAULT_ENABLE,
+
+ /* We could add FEAT_ALWAYS_ENABLE if a need was found. */
+} cpu_feat_en;
+
#define CPU_FEAT_STAGE_MASK 0x00000001
#define CPU_FEAT_EARLY_BOOT 0x00000000
#define CPU_FEAT_AFTER_DEV 0x00000001
@@ -47,23 +73,45 @@ typedef enum {
#define CPU_FEAT_PER_CPU 0x00000000
#define CPU_FEAT_SYSTEM 0x00000010
+#define CPU_FEAT_USER_ENABLED 0x40000000
+#define CPU_FEAT_USER_DISABLED 0x80000000
+
struct cpu_feat;
-typedef bool (cpu_feat_check)(const struct cpu_feat *, u_int);
+typedef cpu_feat_en (cpu_feat_check)(const struct cpu_feat *, u_int);
typedef bool (cpu_feat_has_errata)(const struct cpu_feat *, u_int,
u_int **, u_int *);
-typedef void (cpu_feat_enable)(const struct cpu_feat *, cpu_feat_errata,
+typedef bool (cpu_feat_enable)(const struct cpu_feat *, cpu_feat_errata,
u_int *, u_int);
+typedef void (cpu_feat_disabled)(const struct cpu_feat *);
struct cpu_feat {
const char *feat_name;
cpu_feat_check *feat_check;
cpu_feat_has_errata *feat_has_errata;
cpu_feat_enable *feat_enable;
+ cpu_feat_disabled *feat_disabled;
uint32_t feat_flags;
+ bool feat_enabled;
};
SET_DECLARE(cpu_feat_set, struct cpu_feat);
+SYSCTL_DECL(_hw_feat);
+
+#define CPU_FEAT(name, descr, check, has_errata, enable, disabled, flags) \
+static struct cpu_feat name = { \
+ .feat_name = #name, \
+ .feat_check = check, \
+ .feat_has_errata = has_errata, \
+ .feat_enable = enable, \
+ .feat_disabled = disabled, \
+ .feat_flags = flags, \
+ .feat_enabled = false, \
+}; \
+DATA_SET(cpu_feat_set, name); \
+SYSCTL_BOOL(_hw_feat, OID_AUTO, name, CTLFLAG_RD, &name.feat_enabled, \
+ 0, descr)
+
/*
* Allow drivers to mark an erratum as worked around, e.g. the Errata
* Management ABI may know the workaround isn't needed on a given system.
diff --git a/sys/arm64/include/cpufunc.h b/sys/arm64/include/cpufunc.h
index e6e1f682794e..e9eee643216b 100644
--- a/sys/arm64/include/cpufunc.h
+++ b/sys/arm64/include/cpufunc.h
@@ -96,6 +96,13 @@ serror_enable(void)
__asm __volatile("msr daifclr, #(" __XSTRING(DAIF_A) ")");
}
+static __inline void
+serror_disable(void)
+{
+
+ __asm __volatile("msr daifset, #(" __XSTRING(DAIF_A) ")");
+}
+
static __inline register_t
get_midr(void)
{
diff --git a/sys/arm64/include/db_machdep.h b/sys/arm64/include/db_machdep.h
index 5dc496ca851d..3ef95f7802ea 100644
--- a/sys/arm64/include/db_machdep.h
+++ b/sys/arm64/include/db_machdep.h
@@ -31,7 +31,6 @@
#ifndef _MACHINE_DB_MACHDEP_H_
#define _MACHINE_DB_MACHDEP_H_
-#include <machine/armreg.h>
#include <machine/frame.h>
#include <machine/trap.h>
diff --git a/sys/arm64/include/hypervisor.h b/sys/arm64/include/hypervisor.h
index e3a880afbe9c..7d405e63cd8d 100644
--- a/sys/arm64/include/hypervisor.h
+++ b/sys/arm64/include/hypervisor.h
@@ -30,26 +30,85 @@
#ifndef _MACHINE_HYPERVISOR_H_
#define _MACHINE_HYPERVISOR_H_
+#include <machine/_armreg.h>
+
/*
* These registers are only useful when in hypervisor context,
* e.g. specific to EL2, or controlling the hypervisor.
*/
/* CNTHCTL_EL2 - Counter-timer Hypervisor Control register */
-#define CNTHCTL_EVNTI_MASK (0xf << 4) /* Bit to trigger event stream */
/* Valid if HCR_EL2.E2H == 0 */
-#define CNTHCTL_EL1PCTEN (1 << 0) /* Allow physical counter access */
-#define CNTHCTL_EL1PCEN (1 << 1) /* Allow physical timer access */
+#define CNTHCTL_EL1PCTEN_SHIFT 0
+#define CNTHCTL_EL1PCTEN_MASK (0x1ul << CNTHCTL_E2H_EL1PCTEN_SHIFT)
+#define CNTHCTL_EL1PCTEN_TRAP (0x0ul << CNTHCTL_E2H_EL1PCTEN_SHIFT)
+#define CNTHCTL_EL1PCTEN_NOTRAP (0x1ul << CNTHCTL_EL1PCTEN_SHIFT)
+#define CNTHCTL_EL1PCEN_SHIFT 1
+#define CNTHCTL_EL1PCEN_MASK (0x1ul << CNTHCTL_EL1PCEN_SHIFT)
+#define CNTHCTL_EL1PCEN_TRAP (0x0ul << CNTHCTL_EL1PCEN_SHIFT)
+#define CNTHCTL_EL1PCEN_NOTRAP (0x1ul << CNTHCTL_EL1PCEN_SHIFT)
/* Valid if HCR_EL2.E2H == 1 */
-#define CNTHCTL_E2H_EL0PCTEN (1 << 0) /* Allow EL0 physical counter access */
-#define CNTHCTL_E2H_EL0VCTEN (1 << 1) /* Allow EL0 virtual counter access */
-#define CNTHCTL_E2H_EL0VTEN (1 << 8)
-#define CNTHCTL_E2H_EL0PTEN (1 << 9)
-#define CNTHCTL_E2H_EL1PCTEN (1 << 10) /* Allow physical counter access */
-#define CNTHCTL_E2H_EL1PTEN (1 << 11) /* Allow physical timer access */
+#define CNTHCTL_E2H_EL0PCTEN_SHIFT 0
+#define CNTHCTL_E2H_EL0PCTEN_MASK (0x1ul << CNTHCTL_E2H_EL0PCTEN_SHIFT)
+#define CNTHCTL_E2H_EL0PCTEN_TRAP (0x0ul << CNTHCTL_E2H_EL0PCTEN_SHIFT)
+#define CNTHCTL_E2H_EL0PCTEN_NOTRAP (0x1ul << CNTHCTL_E2H_EL0PCTEN_SHIFT)
+#define CNTHCTL_E2H_EL0VCTEN_SHIFT 1
+#define CNTHCTL_E2H_EL0VCTEN_MASK (0x1ul << CNTHCTL_E2H_EL0VCTEN_SHIFT)
+#define CNTHCTL_E2H_EL0VCTEN_TRAP (0x0ul << CNTHCTL_E2H_EL0VCTEN_SHIFT)
+#define CNTHCTL_E2H_EL0VCTEN_NOTRAP (0x1ul << CNTHCTL_E2H_EL0VCTEN_SHIFT)
+#define CNTHCTL_E2H_EL0VTEN_SHIFT 8
+#define CNTHCTL_E2H_EL0VTEN_MASK (0x1ul << CNTHCTL_E2H_EL0VTEN_SHIFT)
+#define CNTHCTL_E2H_EL0VTEN_TRAP (0x0ul << CNTHCTL_E2H_EL0VTEN_SHIFT)
+#define CNTHCTL_E2H_EL0VTEN_NOTRAP (0x1ul << CNTHCTL_E2H_EL0VTEN_SHIFT)
+#define CNTHCTL_E2H_EL0PTEN_SHIFT 9
+#define CNTHCTL_E2H_EL0PTEN_MASK (0x1ul << CNTHCTL_E2H_EL0PTEN_SHIFT)
+#define CNTHCTL_E2H_EL0PTEN_TRAP (0x0ul << CNTHCTL_E2H_EL0PTEN_SHIFT)
+#define CNTHCTL_E2H_EL0PTEN_NOTRAP (0x1ul << CNTHCTL_E2H_EL0PTEN_SHIFT)
+#define CNTHCTL_E2H_EL1PCTEN_SHIFT 10
+#define CNTHCTL_E2H_EL1PCTEN_MASK (0x1ul << CNTHCTL_E2H_EL1PCTEN_SHIFT)
+#define CNTHCTL_E2H_EL1PCTEN_TRAP (0x0ul << CNTHCTL_E2H_EL1PCTEN_SHIFT)
+#define CNTHCTL_E2H_EL1PCTEN_NOTRAP (0x1ul << CNTHCTL_E2H_EL1PCTEN_SHIFT)
+#define CNTHCTL_E2H_EL1PTEN_SHIFT 11
+#define CNTHCTL_E2H_EL1PTEN_MASK (0x1ul << CNTHCTL_E2H_EL1PTEN_SHIFT)
+#define CNTHCTL_E2H_EL1PTEN_TRAP (0x0ul << CNTHCTL_E2H_EL1PTEN_SHIFT)
+#define CNTHCTL_E2H_EL1PTEN_NOTRAP (0x1ul << CNTHCTL_E2H_EL1PTEN_SHIFT)
/* Unconditionally valid */
-#define CNTHCTL_EVNTDIR (1 << 3) /* Control transition trigger bit */
-#define CNTHCTL_EVNTEN (1 << 2) /* Enable event stream */
+#define CNTHCTL_EVNTEN_SHIFT 2
+#define CNTHCTL_EVNTEN_MASK (0x1ul << CNTHCTL_EVNTEN_SHIFT)
+#define CNTHCTL_EVNTEN_DIS (0x0ul << CNTHCTL_EVNTEN_SHIFT)
+#define CNTHCTL_EVNTEN_EN (0x1ul << CNTHCTL_EVNTEN_SHIFT)
+#define CNTHCTL_EVNTDIR_SHIFT 3
+#define CNTHCTL_EVNTDIR_MASK (0x1ul << CNTHCTL_EVNTDIR_SHIFT)
+#define CNTHCTL_EVNTDIR_HIGH (0x0ul << CNTHCTL_EVNTDIR_SHIFT)
+#define CNTHCTL_EVNTDIR_LOW (0x1ul << CNTHCTL_EVNTDIR_SHIFT)
+#define CNTHCTL_EVNTI_SHIFT 4
+#define CNTHCTL_EVNTI_MASK (0xful << CNTHCTL_EVNTI_SHIFT)
+#define CNTHCTL_ECV_SHIFT 12
+#define CNTHCTL_ECV_MASK (0x1ul << CNTHCTL_ECV_SHIFT)
+#define CNTHCTL_ECV_DIS (0x0ul << CNTHCTL_ECV_SHIFT)
+#define CNTHCTL_ECV_EN (0x1ul << CNTHCTL_ECV_SHIFT)
+#define CNTHCTL_EL1TVT_SHIFT 13
+#define CNTHCTL_EL1TVT_MASK (0x1ul << CNTHCTL_EL1TVT_SHIFT)
+#define CNTHCTL_EL1TVT_NOTRAP (0x0ul << CNTHCTL_EL1TVT_SHIFT)
+#define CNTHCTL_EL1TVT_TRAP (0x1ul << CNTHCTL_EL1TVT_SHIFT)
+#define CNTHCTL_EL1TVCT_SHIFT 14
+#define CNTHCTL_EL1TVCT_MASK (0x1ul << CNTHCTL_EL1TVCT_SHIFT)
+#define CNTHCTL_EL1TVCT_NOTRAP (0x0ul << CNTHCTL_EL1TVCT_SHIFT)
+#define CNTHCTL_EL1TVCT_TRAP (0x1ul << CNTHCTL_EL1TVCT_SHIFT)
+#define CNTHCTL_EL1NVPCT_SHIFT 15
+#define CNTHCTL_EL1NVPCT_MASK (0x1ul << CNTHCTL_EL1NVPCT_SHIFT)
+#define CNTHCTL_EL1NVPCT_NOTRAP (0x0ul << CNTHCTL_EL1NVPCT_SHIFT)
+#define CNTHCTL_EL1NVPCT_TRAP (0x1ul << CNTHCTL_EL1NVPCT_SHIFT)
+#define CNTHCTL_EL1NVVCT_SHIFT 16
+#define CNTHCTL_EL1NVVCT_MASK (0x1ul << CNTHCTL_EL1NVVCT_SHIFT)
+#define CNTHCTL_EL1NVVCT_NOTRAP (0x0ul << CNTHCTL_EL1NVVCT_SHIFT)
+#define CNTHCTL_EL1NVVCT_TRAP (0x1ul << CNTHCTL_EL1NVVCT_SHIFT)
+#define CNTHCTL_EVNTIS_SHIFT 17
+#define CNTHCTL_EVNTIS_MASK (0x1ul << CNTHCTL_EVNTIS_SHIFT)
+#define CNTHCTL_CNTVMASK_SHIFT 18
+#define CNTHCTL_CNTVMASK_MASK (0x1ul << CNTHCTL_CNTVMASK_SHIFT)
+#define CNTHCTL_CNTPMASK_SHIFT 19
+#define CNTHCTL_CNTPMASK_MASK (0x1ul << CNTHCTL_CNTPMASK_SHIFT)
/* CNTPOFF_EL2 - Counter-timer Physical Offset Register */
#define CNTPOFF_EL2_REG MRS_REG_ALT_NAME(CNTPOFF_EL2)
@@ -190,6 +249,54 @@
#define ICC_SRE_EL2_SRE (1UL << 0)
#define ICC_SRE_EL2_EN (1UL << 3)
+/* MDCR_EL2 - Hyp Debug Control Register */
+#define MDCR_EL2_HPMN_MASK 0x1f
+#define MDCR_EL2_HPMN_SHIFT 0
+#define MDCR_EL2_TPMCR_SHIFT 5
+#define MDCR_EL2_TPMCR (0x1UL << MDCR_EL2_TPMCR_SHIFT)
+#define MDCR_EL2_TPM_SHIFT 6
+#define MDCR_EL2_TPM (0x1UL << MDCR_EL2_TPM_SHIFT)
+#define MDCR_EL2_HPME_SHIFT 7
+#define MDCR_EL2_HPME (0x1UL << MDCR_EL2_HPME_SHIFT)
+#define MDCR_EL2_TDE_SHIFT 8
+#define MDCR_EL2_TDE (0x1UL << MDCR_EL2_TDE_SHIFT)
+#define MDCR_EL2_TDA_SHIFT 9
+#define MDCR_EL2_TDA (0x1UL << MDCR_EL2_TDA_SHIFT)
+#define MDCR_EL2_TDOSA_SHIFT 10
+#define MDCR_EL2_TDOSA (0x1UL << MDCR_EL2_TDOSA_SHIFT)
+#define MDCR_EL2_TDRA_SHIFT 11
+#define MDCR_EL2_TDRA (0x1UL << MDCR_EL2_TDRA_SHIFT)
+#define MDCR_EL2_E2PB_SHIFT 12
+#define MDCR_EL2_E2PB_MASK (0x3UL << MDCR_EL2_E2PB_SHIFT)
+#define MDCR_EL2_TPMS_SHIFT 14
+#define MDCR_EL2_TPMS (0x1UL << MDCR_EL2_TPMS_SHIFT)
+#define MDCR_EL2_EnSPM_SHIFT 15
+#define MDCR_EL2_EnSPM (0x1UL << MDCR_EL2_EnSPM_SHIFT)
+#define MDCR_EL2_HPMD_SHIFT 17
+#define MDCR_EL2_HPMD (0x1UL << MDCR_EL2_HPMD_SHIFT)
+#define MDCR_EL2_TTRF_SHIFT 19
+#define MDCR_EL2_TTRF (0x1UL << MDCR_EL2_TTRF_SHIFT)
+#define MDCR_EL2_HCCD_SHIFT 23
+#define MDCR_EL2_HCCD (0x1UL << MDCR_EL2_HCCD_SHIFT)
+#define MDCR_EL2_E2TB_SHIFT 24
+#define MDCR_EL2_E2TB_MASK (0x3UL << MDCR_EL2_E2TB_SHIFT)
+#define MDCR_EL2_HLP_SHIFT 26
+#define MDCR_EL2_HLP (0x1UL << MDCR_EL2_HLP_SHIFT)
+#define MDCR_EL2_TDCC_SHIFT 27
+#define MDCR_EL2_TDCC (0x1UL << MDCR_EL2_TDCC_SHIFT)
+#define MDCR_EL2_MTPME_SHIFT 28
+#define MDCR_EL2_MTPME (0x1UL << MDCR_EL2_MTPME_SHIFT)
+#define MDCR_EL2_HPMFZO_SHIFT 29
+#define MDCR_EL2_HPMFZO (0x1UL << MDCR_EL2_HPMFZO_SHIFT)
+#define MDCR_EL2_PMSSE_SHIFT 30
+#define MDCR_EL2_PMSSE_MASK (0x3UL << MDCR_EL2_PMSSE_SHIFT)
+#define MDCR_EL2_HPMFZS_SHIFT 36
+#define MDCR_EL2_HPMFZS (0x1UL << MDCR_EL2_HPMFZS_SHIFT)
+#define MDCR_EL2_PMEE_SHIFT 40
+#define MDCR_EL2_PMEE_MASK (0x3UL << MDCR_EL2_PMEE_SHIFT)
+#define MDCR_EL2_EBWE_SHIFT 43
+#define MDCR_EL2_EBWE (0x1UL << MDCR_EL2_EBWE_SHIFT)
+
/* SCTLR_EL2 - System Control Register */
#define SCTLR_EL2_RES1 0x30c50830
#define SCTLR_EL2_M_SHIFT 0
@@ -299,52 +406,4 @@
/* Assumed to be 0 by locore.S */
#define VTTBR_HOST 0x0000000000000000
-/* MDCR_EL2 - Hyp Debug Control Register */
-#define MDCR_EL2_HPMN_MASK 0x1f
-#define MDCR_EL2_HPMN_SHIFT 0
-#define MDCR_EL2_TPMCR_SHIFT 5
-#define MDCR_EL2_TPMCR (0x1UL << MDCR_EL2_TPMCR_SHIFT)
-#define MDCR_EL2_TPM_SHIFT 6
-#define MDCR_EL2_TPM (0x1UL << MDCR_EL2_TPM_SHIFT)
-#define MDCR_EL2_HPME_SHIFT 7
-#define MDCR_EL2_HPME (0x1UL << MDCR_EL2_HPME_SHIFT)
-#define MDCR_EL2_TDE_SHIFT 8
-#define MDCR_EL2_TDE (0x1UL << MDCR_EL2_TDE_SHIFT)
-#define MDCR_EL2_TDA_SHIFT 9
-#define MDCR_EL2_TDA (0x1UL << MDCR_EL2_TDA_SHIFT)
-#define MDCR_EL2_TDOSA_SHIFT 10
-#define MDCR_EL2_TDOSA (0x1UL << MDCR_EL2_TDOSA_SHIFT)
-#define MDCR_EL2_TDRA_SHIFT 11
-#define MDCR_EL2_TDRA (0x1UL << MDCR_EL2_TDRA_SHIFT)
-#define MDCR_E2PB_SHIFT 12
-#define MDCR_E2PB_MASK (0x3UL << MDCR_E2PB_SHIFT)
-#define MDCR_TPMS_SHIFT 14
-#define MDCR_TPMS (0x1UL << MDCR_TPMS_SHIFT)
-#define MDCR_EnSPM_SHIFT 15
-#define MDCR_EnSPM (0x1UL << MDCR_EnSPM_SHIFT)
-#define MDCR_HPMD_SHIFT 17
-#define MDCR_HPMD (0x1UL << MDCR_HPMD_SHIFT)
-#define MDCR_TTRF_SHIFT 19
-#define MDCR_TTRF (0x1UL << MDCR_TTRF_SHIFT)
-#define MDCR_HCCD_SHIFT 23
-#define MDCR_HCCD (0x1UL << MDCR_HCCD_SHIFT)
-#define MDCR_E2TB_SHIFT 24
-#define MDCR_E2TB_MASK (0x3UL << MDCR_E2TB_SHIFT)
-#define MDCR_HLP_SHIFT 26
-#define MDCR_HLP (0x1UL << MDCR_HLP_SHIFT)
-#define MDCR_TDCC_SHIFT 27
-#define MDCR_TDCC (0x1UL << MDCR_TDCC_SHIFT)
-#define MDCR_MTPME_SHIFT 28
-#define MDCR_MTPME (0x1UL << MDCR_MTPME_SHIFT)
-#define MDCR_HPMFZO_SHIFT 29
-#define MDCR_HPMFZO (0x1UL << MDCR_HPMFZO_SHIFT)
-#define MDCR_PMSSE_SHIFT 30
-#define MDCR_PMSSE_MASK (0x3UL << MDCR_PMSSE_SHIFT)
-#define MDCR_HPMFZS_SHIFT 36
-#define MDCR_HPMFZS (0x1UL << MDCR_HPMFZS_SHIFT)
-#define MDCR_PMEE_SHIFT 40
-#define MDCR_PMEE_MASK (0x3UL << MDCR_PMEE_SHIFT)
-#define MDCR_EBWE_SHIFT 43
-#define MDCR_EBWE (0x1UL << MDCR_EBWE_SHIFT)
-
#endif /* !_MACHINE_HYPERVISOR_H_ */
diff --git a/sys/arm64/include/kexec.h b/sys/arm64/include/kexec.h
new file mode 100644
index 000000000000..0a8c7a053331
--- /dev/null
+++ b/sys/arm64/include/kexec.h
@@ -0,0 +1,33 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2025 Juniper Networks, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _ARM64_KEXEC_H_
+#define _ARM64_KEXEC_H_
+
+#define KEXEC_MD_PAGES(x) 0
+
+#endif /* _ARM64_KEXEC_H_ */
diff --git a/sys/arm64/include/pcpu.h b/sys/arm64/include/pcpu.h
index 09bd8fa8a966..73399d2c3f8c 100644
--- a/sys/arm64/include/pcpu.h
+++ b/sys/arm64/include/pcpu.h
@@ -50,7 +50,8 @@ struct debug_monitor_state;
struct pmap *pc_curvmpmap; \
uint64_t pc_mpidr; \
u_int pc_bcast_tlbi_workaround; \
- char __pad[197]
+ uint64_t pc_release_addr; \
+ char __pad[189]
#ifdef _KERNEL
diff --git a/sys/arm64/include/pmap.h b/sys/arm64/include/pmap.h
index 0f23f200f0f6..406b6e2c5e0a 100644
--- a/sys/arm64/include/pmap.h
+++ b/sys/arm64/include/pmap.h
@@ -69,6 +69,7 @@ struct md_page {
TAILQ_HEAD(,pv_entry) pv_list;
int pv_gen;
vm_memattr_t pv_memattr;
+ uint8_t pv_reserve[3];
};
enum pmap_stage {
@@ -174,6 +175,8 @@ int pmap_fault(pmap_t, uint64_t, uint64_t);
struct pcb *pmap_switch(struct thread *);
+void pmap_s1_invalidate_all_kernel(void);
+
extern void (*pmap_clean_stage2_tlbi)(void);
extern void (*pmap_stage2_invalidate_range)(uint64_t, vm_offset_t, vm_offset_t,
bool);
diff --git a/sys/arm64/include/proc.h b/sys/arm64/include/proc.h
index 184743d4cc80..b40990e89385 100644
--- a/sys/arm64/include/proc.h
+++ b/sys/arm64/include/proc.h
@@ -75,6 +75,7 @@ struct mdthread {
struct mdproc {
uint64_t md_tcr; /* TCR_EL1 fields to update */
+ uint64_t md_reserved[2];
};
#endif /* !LOCORE */
diff --git a/sys/arm64/include/smp.h b/sys/arm64/include/smp.h
index 500cd1ef4f02..4a5bfda3ac1c 100644
--- a/sys/arm64/include/smp.h
+++ b/sys/arm64/include/smp.h
@@ -40,6 +40,7 @@ enum {
IPI_STOP,
IPI_STOP_HARD,
IPI_HARDCLOCK,
+ IPI_OFF,
INTR_IPI_COUNT,
};
diff --git a/sys/arm64/include/vmm.h b/sys/arm64/include/vmm.h
index 73b5b4a09591..e67540eac66d 100644
--- a/sys/arm64/include/vmm.h
+++ b/sys/arm64/include/vmm.h
@@ -42,6 +42,7 @@ enum vm_suspend_how {
VM_SUSPEND_RESET,
VM_SUSPEND_POWEROFF,
VM_SUSPEND_HALT,
+ VM_SUSPEND_DESTROY,
VM_SUSPEND_LAST
};
@@ -105,27 +106,6 @@ enum vm_reg_name {
#define VM_GUEST_BASE_IPA 0x80000000UL /* Guest kernel start ipa */
-/*
- * The VM name has to fit into the pathname length constraints of devfs,
- * governed primarily by SPECNAMELEN. The length is the total number of
- * characters in the full path, relative to the mount point and not
- * including any leading '/' characters.
- * A prefix and a suffix are added to the name specified by the user.
- * The prefix is usually "vmm/" or "vmm.io/", but can be a few characters
- * longer for future use.
- * The suffix is a string that identifies a bootrom image or some similar
- * image that is attached to the VM. A separator character gets added to
- * the suffix automatically when generating the full path, so it must be
- * accounted for, reducing the effective length by 1.
- * The effective length of a VM name is 229 bytes for FreeBSD 13 and 37
- * bytes for FreeBSD 12. A minimum length is set for safety and supports
- * a SPECNAMELEN as small as 32 on old systems.
- */
-#define VM_MAX_PREFIXLEN 10
-#define VM_MAX_SUFFIXLEN 15
-#define VM_MAX_NAMELEN \
- (SPECNAMELEN - VM_MAX_PREFIXLEN - VM_MAX_SUFFIXLEN - 1)
-
#ifdef _KERNEL
struct vm;
struct vm_exception;
@@ -142,10 +122,41 @@ struct vm_eventinfo {
int *iptr; /* reqidle cookie */
};
+#define DECLARE_VMMOPS_FUNC(ret_type, opname, args) \
+ ret_type vmmops_##opname args
+
+DECLARE_VMMOPS_FUNC(int, modinit, (int ipinum));
+DECLARE_VMMOPS_FUNC(int, modcleanup, (void));
+DECLARE_VMMOPS_FUNC(void *, init, (struct vm *vm, struct pmap *pmap));
+DECLARE_VMMOPS_FUNC(int, gla2gpa, (void *vcpui, struct vm_guest_paging *paging,
+ uint64_t gla, int prot, uint64_t *gpa, int *is_fault));
+DECLARE_VMMOPS_FUNC(int, run, (void *vcpui, register_t pc, struct pmap *pmap,
+ struct vm_eventinfo *info));
+DECLARE_VMMOPS_FUNC(void, cleanup, (void *vmi));
+DECLARE_VMMOPS_FUNC(void *, vcpu_init, (void *vmi, struct vcpu *vcpu,
+ int vcpu_id));
+DECLARE_VMMOPS_FUNC(void, vcpu_cleanup, (void *vcpui));
+DECLARE_VMMOPS_FUNC(int, exception, (void *vcpui, uint64_t esr, uint64_t far));
+DECLARE_VMMOPS_FUNC(int, getreg, (void *vcpui, int num, uint64_t *retval));
+DECLARE_VMMOPS_FUNC(int, setreg, (void *vcpui, int num, uint64_t val));
+DECLARE_VMMOPS_FUNC(int, getcap, (void *vcpui, int num, int *retval));
+DECLARE_VMMOPS_FUNC(int, setcap, (void *vcpui, int num, int val));
+DECLARE_VMMOPS_FUNC(struct vmspace *, vmspace_alloc, (vm_offset_t min,
+ vm_offset_t max));
+DECLARE_VMMOPS_FUNC(void, vmspace_free, (struct vmspace *vmspace));
+#ifdef notyet
+#ifdef BHYVE_SNAPSHOT
+DECLARE_VMMOPS_FUNC(int, snapshot, (void *vmi, struct vm_snapshot_meta *meta));
+DECLARE_VMMOPS_FUNC(int, vcpu_snapshot, (void *vcpui,
+ struct vm_snapshot_meta *meta));
+DECLARE_VMMOPS_FUNC(int, restore_tsc, (void *vcpui, uint64_t now));
+#endif
+#endif
+
int vm_create(const char *name, struct vm **retvm);
struct vcpu *vm_alloc_vcpu(struct vm *vm, int vcpuid);
void vm_disable_vcpu_creation(struct vm *vm);
-void vm_slock_vcpus(struct vm *vm);
+void vm_lock_vcpus(struct vm *vm);
void vm_unlock_vcpus(struct vm *vm);
void vm_destroy(struct vm *vm);
int vm_reinit(struct vm *vm);
@@ -231,7 +242,6 @@ vcpu_should_yield(struct vcpu *vcpu)
void *vcpu_stats(struct vcpu *vcpu);
void vcpu_notify_event(struct vcpu *vcpu);
-struct vmspace *vm_vmspace(struct vm *vm);
struct vm_mem *vm_mem(struct vm *vm);
enum vm_reg_name vm_segment_name(int seg_encoding);
diff --git a/sys/arm64/include/vmm_dev.h b/sys/arm64/include/vmm_dev.h
index 219f1116c728..289ff0fe1fc9 100644
--- a/sys/arm64/include/vmm_dev.h
+++ b/sys/arm64/include/vmm_dev.h
@@ -31,6 +31,8 @@
#include <machine/vmm.h>
+#include <dev/vmm/vmm_param.h>
+
struct vm_memmap {
vm_paddr_t gpa;
int segid; /* memory segment */